diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d20dc59fb4..0000000000 --- a/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Different components of OpenEmbedded are under different licenses (a mix -of MIT and GPLv2). See LICENSE.GPL-2.0-only and LICENSE.MIT for further -details of the individual licenses. - -All metadata is MIT licensed unless otherwise stated. Source code -included in tree for individual recipes (e.g. patches) are under -the LICENSE stated in the associated recipe (.bb file) unless -otherwise stated. - -License information for any other files is either explicitly stated -or defaults to GPL version 2 only. - -Individual files contain the following style tags instead of the full license -text to identify their license: - - SPDX-License-Identifier: GPL-2.0-only - SPDX-License-Identifier: MIT - -This enables machine processing of license information based on the SPDX -License Identifiers that are here available: http://spdx.org/licenses/ diff --git a/LICENSE.GPL-2.0-only b/LICENSE.GPL-2.0-only deleted file mode 100644 index 5db3c0a21c..0000000000 --- a/LICENSE.GPL-2.0-only +++ /dev/null @@ -1,288 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - -Note: -Individual files contain the following tag instead of the full license text. - - SPDX-License-Identifier: GPL-2.0-only - -This enables machine processing of license information based on the SPDX -License Identifiers that are here available: http://spdx.org/licenses/ diff --git a/LICENSE.MIT b/LICENSE.MIT deleted file mode 100644 index a6919eb7e1..0000000000 --- a/LICENSE.MIT +++ /dev/null @@ -1,25 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -Note: -Individual files contain the following tag instead of the full license text. - - SPDX-License-Identifier: MIT - -This enables machine processing of license information based on the SPDX -License Identifiers that are here available: http://spdx.org/licenses/ diff --git a/MAINTAINERS.md b/MAINTAINERS.md deleted file mode 100644 index d943ed38f1..0000000000 --- a/MAINTAINERS.md +++ /dev/null @@ -1,69 +0,0 @@ -OpenEmbedded-Core and Yocto Project Maintainer Information -========================================================== - -OpenEmbedded and Yocto Project work jointly together to maintain the metadata, -layers, tools and sub-projects that make up their ecosystems. - -The projects operate through collaborative development. This currently takes -place on mailing lists for many components as the "pull request on github" -workflow works well for single or small numbers of maintainers but we have -a large number, all with different specialisms and benefit from the mailing -list review process. Changes therefore undergo peer review through mailing -lists in many cases. - -This file aims to acknowledge people with specific skills/knowledge/interest -both to recognise their contributions but also empower them to help lead and -curate those components. Where we have people with specialist knowledge in -particular areas, during review patches/feedback from these people in these -areas would generally carry weight. - -This file is maintained in OE-Core but may refer to components that are separate -to it if that makes sense in the context of maintainership. The README of specific -layers and components should ultimately be definitive about the patch process and -maintainership for the component. - -Recipe Maintainers ------------------- - -See meta/conf/distro/include/maintainers.inc - -Component/Subsystem Maintainers -------------------------------- - -* Kernel (inc. linux-yocto, perf): Bruce Ashfield -* Reproducible Builds: Joshua Watt -* Toaster: David Reyna -* Hash-Equivalence: Joshua Watt -* Recipe upgrade infrastructure: Alex Kanavin -* Toolchain: Khem Raj -* ptest-runner: Aníbal Limón -* opkg: Alex Stewart -* devtool: Saul Wold -* eSDK: Saul Wold -* overlayfs: Vyacheslav Yurkov -* Patchtest: Trevor Gamblin - -Maintainers needed ------------------- - -* Pseudo -* Layer Index -* recipetool -* QA framework/automated testing -* error reporting system/web UI -* wic -* Patchwork -* Matchbox -* Sato -* Autobuilder - -Layer Maintainers needed ------------------------- - -* meta-gplv2 (ideally new strategy but active maintainer welcome) - -Shadow maintainers/development needed --------------------------------------- - -* toaster -* bitbake diff --git a/MEMORIAM b/MEMORIAM deleted file mode 100644 index 0b3ce4670f..0000000000 --- a/MEMORIAM +++ /dev/null @@ -1,5 +0,0 @@ -Some project contributors who are sadly no longer with us: - -Greg Gilbert (treke) - Ahead of his time with licensing -Thomas Wood (thos) - Creator of the original sato -Scott Rifenbark (scottrif) - Our long standing techwriter whose words live on diff --git a/README b/README new file mode 100644 index 0000000000..1d36bde727 --- /dev/null +++ b/README @@ -0,0 +1,111 @@ +The poky repository master branch is no longer being updated. + +You can either: + +a) switch to individual clones of bitbake, openembedded-core, meta-yocto and yocto-docs + +b) use the new bitbake-setup + +You can find information about either approach in our documentation: +https://docs.yoctoproject.org/ + +Note that "poky" the distro setting is still available in meta-yocto as +before and we continue to use and maintain that. + +Long live Poky! + + + + +Some further information on the background of this change follows. The +details are taken from: +https://lists.openembedded.org/g/openembedded-architecture/message/2179 + +TLDR: People have complained about the combo-layer built poky +repository for years. It was meant to be a temporary thing, we now have +an alternative and I'm therefore doing what I promised I'd do. Change +is tough, things may break but this is the right point to at least try +it. + +I'd like to note that: + * setting up builds with a separate oe-core and bitbake clone + works as it always has done + * you can change your CI just to use those two repos instead of poky + * bitbake-setup isn't mandatory, it will just be what the yocto- + docs presents to users + * we don't have to stop maintaining the poky repository + however nobody will test the new approach/code unless we do + * we are optionally exposing sstate mirrors in the new config + * we are also exposing config fragments to users + * poky as a DISTRO in meta-yocto remains + +A bit more about the history and background for those who are +interested and then some FAQs: + +Back around 2010 when we split up openembedded-classic and started +developing layers, we made the artificial "poky" repository construct +as a way to let people easily and quickly get started with the project. +without cloning and managing multiple repositories. Layers were a new +idea with lots of rough edges. kas didn't exist, I think repo was only +just created and it was a different world. For us, ut meant hacking up +a quick tool, "combo-layer" and it was really a temporary solution to +fill a gap and it was at least as functional as repo of the era. It was +assumed we'd work it out properly in the future. + +At developer meetings there are inevitable questions about why +poky/combo-layer exist and few seem to actually like/support it. There +are continual questions about why a tool doesn't exist or why we don't +adopt one too. + +15 years later, a bit longer than we might have thought, we are finally +in a position where there may be a viable way forward to change. + +It has taken us a bit of time to get to this point. I wrote the +original description of something like bitbake-setup about 7-8 years +ago. I shared it privately with a few people, the review feedback +stopped me pushing it further as I simply did not have the bandwidth. +We were fortunate to get funding from the Sovereign Tech Fund to start +the work and whilst I'd probably prefer to avoid the issue, the time +had come to start. Since then, Alexander Kanavin has put a lot of work +into getting it to the point where it would be possible to switch. A +huge thanks to him for getting this to the current point. + +Why not use kas/submodules/repo? + +This topic has been discussed in depth several times. Very roughly, +these are either difficult to focus on our use cases or have specific +designs and intent which we as a project would struggle to influence. +We are taking significant influence from some of them but also trying +to build something where we can benefit from tight direct integration +with bitbake and the metadata. For example fragment support is generic +and hopefully something other approaches can also benefit from. We want +to provide something we can switch the projects docs and autobuilder to +which we can control and develop as we need it to. We are not aiming to +force anyone to switch, you can use whichever tool you want. + +Can we not keep poky [repository master branch] around? + +If we do that, nobody will use the new tooling and it will be a +disaster as issues won't get resolved. We need our CI to use the same +thing we promote to our new and experienced users. We need this new +tooling to be usable by our experienced developers too. We have tried +for months to get people to try it and they simply don't. Making a +release with it won't change much either. It needs people using it and +for that, poky has to stop being updated. + +What happens to poky [repository]? + +The LTS branches continue their lifetime as planned. For master, I'll +probably put a final commit in changing to just a README which points +people at the bitbake-setup changes and explains what happened. + +What are the timelines? Why now? + +If we're going to make a change, we really want this in the next LTS +release, which is April 2026. We only have one release before that +which is now, October 2025. We therefore need to switch now, and then +give us time to update docs, fix issues that arise and so on and have +it in a release cycle. Whilst it means delaying the Oct 2025 release +slightly, that is the right thing to do in the context of the bigger +picture. + diff --git a/README.OE-Core.md b/README.OE-Core.md deleted file mode 100644 index 7187fb94be..0000000000 --- a/README.OE-Core.md +++ /dev/null @@ -1,32 +0,0 @@ -OpenEmbedded-Core -================= - -OpenEmbedded-Core is a layer containing the core metadata for current versions -of OpenEmbedded. It is distro-less (can build a functional image with -DISTRO = "nodistro") and contains only emulated machine support. - -For information about OpenEmbedded, see the OpenEmbedded website: - - -The Yocto Project has extensive documentation about OE including a reference manual -which can be found at: - - -Contributing ------------- - -Please refer to our contributor guide here: -for full details on how to submit changes. - -As a quick guide, patches should be sent to openembedded-core@lists.openembedded.org -The git command to do that would be: - -``` -git send-email -M -1 --to openembedded-core@lists.openembedded.org -``` - -Mailing list: - - -Source code: - diff --git a/README.hardware.md b/README.hardware.md deleted file mode 120000 index 0d0745f46d..0000000000 --- a/README.hardware.md +++ /dev/null @@ -1 +0,0 @@ -meta-yocto-bsp/README.hardware.md \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 120000 index 6d6b2976fc..0000000000 --- a/README.md +++ /dev/null @@ -1 +0,0 @@ -README.poky.md \ No newline at end of file diff --git a/README.poky.md b/README.poky.md deleted file mode 120000 index 15b27a322a..0000000000 --- a/README.poky.md +++ /dev/null @@ -1 +0,0 @@ -meta-poky/README.poky.md \ No newline at end of file diff --git a/README.qemu.md b/README.qemu.md deleted file mode 100644 index cff0f07914..0000000000 --- a/README.qemu.md +++ /dev/null @@ -1,15 +0,0 @@ -QEMU Emulation Targets -====================== - -To simplify development, the build system supports building images to -work with the QEMU emulator in system emulation mode. Several architectures -are currently supported in 32 and 64 bit variants: - -* ARM (qemuarm + qemuarm64) -* x86 (qemux86 + qemux86-64) -* PowerPC (qemuppc only) -* MIPS (qemumips + qemumips64) - -Use of the QEMU images is covered in the Yocto Project Reference Manual. -The appropriate MACHINE variable value corresponding to the target is given -in brackets. diff --git a/SECURITY.md b/SECURITY.md deleted file mode 100644 index a83e09bb0f..0000000000 --- a/SECURITY.md +++ /dev/null @@ -1,22 +0,0 @@ -How to Report a Potential Vulnerability -======================================= - -If you would like to report a public issue (for example, one with a released -CVE number), please report it using the -[Security Bugzilla](https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security) - -If you are dealing with a not-yet released or urgent issue, please send a -message to security AT yoctoproject DOT org, including as many details as -possible: the layer or software module affected, the recipe and its version, -and any example code, if available. - -Branches maintained with security fixes ---------------------------------------- - -See [Stable release and LTS](https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS) -for detailed info regarding the policies and maintenance of Stable branches. - -The [Release page](https://wiki.yoctoproject.org/wiki/Releases) contains -a list of all releases of the Yocto Project. Versions in grey are no longer -actively maintained with security patches, but well-tested patches may still -be accepted for them for significant issues. diff --git a/bitbake/.b4-config b/bitbake/.b4-config deleted file mode 100644 index 047f0b94a4..0000000000 --- a/bitbake/.b4-config +++ /dev/null @@ -1,4 +0,0 @@ -[b4] - send-series-to = bitbake-devel@lists.openembedded.org - send-auto-cc-cmd = ./contrib/b4-wrapper-bitbake.py send-auto-cc-cmd - prep-pre-flight-checks = disable-needs-checking diff --git a/bitbake/.gitattributes b/bitbake/.gitattributes deleted file mode 100644 index e4f8f62fc7..0000000000 --- a/bitbake/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*min.js binary -*min.css binary diff --git a/bitbake/AUTHORS b/bitbake/AUTHORS deleted file mode 100644 index 91fd78fd25..0000000000 --- a/bitbake/AUTHORS +++ /dev/null @@ -1,10 +0,0 @@ -Tim Ansell -Phil Blundell -Seb Frankengul -Holger Freyther -Marcin Juszkiewicz -Chris Larson -Ulrich Luckas -Mickey Lauer -Richard Purdie -Holger Schurig diff --git a/bitbake/ChangeLog b/bitbake/ChangeLog deleted file mode 100644 index 4ac2a64462..0000000000 --- a/bitbake/ChangeLog +++ /dev/null @@ -1,317 +0,0 @@ -Changes in Bitbake 1.9.x: - - Add PE (Package Epoch) support from Philipp Zabel (pH5) - - Treat python functions the same as shell functions for logging - - Use TMPDIR/anonfunc as a __anonfunc temp directory (T) - - Catch truncated cache file errors - - Allow operations other than assignment on flag variables - - Add code to handle inter-task dependencies - - Fix cache errors when generation dotGraphs - - Make sure __inherit_cache is updated before calling include() (from Michael Krelin) - - Fix bug when target was in ASSUME_PROVIDED (#2236) - - Raise ParseError for filenames with multiple underscores instead of infinitely looping (#2062) - - Fix invalid regexp in BBMASK error handling (missing import) (#1124) - - Promote certain warnings from debug to note 2 level - - Update manual - - Correctly redirect stdin when forking - - If parsing errors are found, exit, too many users miss the errors - - Remove supriours PREFERRED_PROVIDER warnings - - svn fetcher: Add _buildsvncommand function - - Improve certain error messages - - Rewrite svn fetcher to make adding extra operations easier - as part of future SRCDATE="now" fixes - (requires new FETCHCMD_svn definition in bitbake.conf) - - Change SVNDIR layout to be more unique (fixes #2644 and #2624) - - Add ConfigParsed Event after configuration parsing is complete - - Add SRCREV support for svn fetcher - - data.emit_var() - only call getVar if we need the variable - - Stop generating the A variable (seems to be legacy code) - - Make sure intertask depends get processed correcting in recursive depends - - Add pn-PN to overrides when evaluating PREFERRED_VERSION - - Improve the progress indicator by skipping tasks that have - already run before starting the build rather than during it - - Add profiling option (-P) - - Add BB_SRCREV_POLICY variable (clear or cache) to control SRCREV cache - - Add SRCREV_FORMAT support - - Fix local fetcher's localpath return values - - Apply OVERRIDES before performing immediate expansions - - Allow the -b -e option combination to take regular expressions - - Fix handling of variables with expansion in the name using _append/_prepend - e.g. RRECOMMENDS_${PN}_append_xyz = "abc" - - Add plain message function to bb.msg - - Sort the list of providers before processing so dependency problems are - reproducible rather than effectively random - - Fix/improve bitbake -s output - - Add locking for fetchers so only one tries to fetch a given file at a given time - - Fix int(0)/None confusion in runqueue.py which causes random gaps in dependency chains - - Expand data in addtasks - - Print the list of missing DEPENDS,RDEPENDS for the "No buildable providers available for required...." - error message. - - Rework add_task to be more efficient (6% speedup, 7% number of function calls reduction) - - Sort digraph output to make builds more reproducible - - Split expandKeys into two for loops to benefit from the expand_cache (12% speedup) - - runqueue.py: Fix idepends handling to avoid dependency errors - - Clear the terminal TOSTOP flag if set (and warn the user) - - Fix regression from r653 and make SRCDATE/CVSDATE work for packages again - - Fix a bug in bb.decodeurl where http://some.where.com/somefile.tgz decoded to host="" (#1530) - - Warn about malformed PREFERRED_PROVIDERS (#1072) - - Add support for BB_NICE_LEVEL option (#1627) - - Psyco is used only on x86 as there is no support for other architectures. - - Sort initial providers list by default preference (#1145, #2024) - - Improve provider sorting so prefered versions have preference over latest versions (#768) - - Detect builds of tasks with overlapping providers and warn (will become a fatal error) (#1359) - - Add MULTI_PROVIDER_WHITELIST variable to allow known safe multiple providers to be listed - - Handle paths in svn fetcher module parameter - - Support the syntax "export VARIABLE" - - Add bzr fetcher - - Add support for cleaning directories before a task in the form: - do_taskname[cleandirs] = "dir" - - bzr fetcher tweaks from Robert Schuster (#2913) - - Add mercurial (hg) fetcher from Robert Schuster (#2913) - - Don't add duplicates to BBPATH - - Fix preferred_version return values (providers.py) - - Fix 'depends' flag splitting - - Fix unexport handling (#3135) - - Add bb.copyfile function similar to bb.movefile (and improve movefile error reporting) - - Allow multiple options for deptask flag - - Use git-fetch instead of git-pull removing any need for merges when - fetching (we don't care about the index). Fixes fetch errors. - - Add BB_GENERATE_MIRROR_TARBALLS option, set to 0 to make git fetches - faster at the expense of not creating mirror tarballs. - - SRCREV handling updates, improvements and fixes from Poky - - Add bb.utils.lockfile() and bb.utils.unlockfile() from Poky - - Add support for task selfstamp and lockfiles flags - - Disable task number acceleration since it can allow the tasks to run - out of sequence - - Improve runqueue code comments - - Add task scheduler abstraction and some example schedulers - - Improve circular dependency chain debugging code and user feedback - - Don't give a stacktrace for invalid tasks, have a user friendly message (#3431) - - Add support for "-e target" (#3432) - - Fix shell showdata command (#3259) - - Fix shell data updating problems (#1880) - - Properly raise errors for invalid source URI protocols - - Change the wget fetcher failure handling to avoid lockfile problems - - Add support for branches in git fetcher (Otavio Salvador, Michael Lauer) - - Make taskdata and runqueue errors more user friendly - - Add norecurse and fullpath options to cvs fetcher - - Fix exit code for build failures in --continue mode - - Fix git branch tags fetching - - Change parseConfigurationFile so it works on real data, not a copy - - Handle 'base' inherit and all other INHERITs from parseConfigurationFile - instead of BBHandler - - Fix getVarFlags bug in data_smart - - Optmise cache handling by more quickly detecting an invalid cache, only - saving the cache when its changed, moving the cache validity check into - the parsing loop and factoring some getVar calls outside a for loop - - Cooker: Remove a debug message from the parsing loop to lower overhead - - Convert build.py exec_task to use getVarFlags - - Update shell to use cooker.buildFile - - Add StampUpdate event - - Convert -b option to use taskdata/runqueue - - Remove digraph and switch to new stamp checking code. exec_task no longer - honours dependencies - - Make fetcher timestamp updating non-fatal when permissions don't allow - updates - - Add BB_SCHEDULER variable/option ("completion" or "speed") controlling - the way bitbake schedules tasks - - Add BB_STAMP_POLICY variable/option ("perfile" or "full") controlling - how extensively stamps are looked at for validity - - When handling build target failures make sure idepends are checked and - failed where needed. Fixes --continue mode crashes. - - Fix -f (force) in conjunction with -b - - Fix problems with recrdeptask handling where some idepends weren't handled - correctly. - - Handle exit codes correctly (from pH5) - - Work around refs/HEAD issues with git over http (#3410) - - Add proxy support to the CVS fetcher (from Cyril Chemparathy) - - Improve runfetchcmd so errors are seen and various GIT variables are exported - - Add ability to fetchers to check URL validity without downloading - - Improve runtime PREFERRED_PROVIDERS warning message - - Add BB_STAMP_WHITELIST option which contains a list of stamps to ignore when - checking stamp dependencies and using a BB_STAMP_POLICY of "whitelist" - - No longer weight providers on the basis of a package being "already staged". This - leads to builds being non-deterministic. - - Flush stdout/stderr before forking to fix duplicate console output - - Make sure recrdeps tasks include all inter-task dependencies of a given fn - - Add bb.runqueue.check_stamp_fn() for use by packaged-staging - - Add PERSISTENT_DIR to store the PersistData in a persistent - directory != the cache dir. - - Add md5 and sha256 checksum generation functions to utils.py - - Correctly handle '-' characters in class names (#2958) - - Make sure expandKeys has been called on the data dictionary before running tasks - - Correctly add a task override in the form task-TASKNAME. - - Revert the '-' character fix in class names since it breaks things - - When a regexp fails to compile for PACKAGES_DYNAMIC, print a more useful error (#4444) - - Allow to checkout CVS by Date and Time. Just add HHmm to the SRCDATE. - - Move prunedir function to utils.py and add explode_dep_versions function - - Raise an exception if SRCREV == 'INVALID' - - Fix hg fetcher username/password handling and fix crash - - Fix PACKAGES_DYNAMIC handling of packages with '++' in the name - - Rename __depends to __base_depends after configuration parsing so we don't - recheck the validity of the config files time after time - - Add better environmental variable handling. By default it will now only pass certain - whitelisted variables into the data store. If BB_PRESERVE_ENV is set bitbake will use - all variable from the environment. If BB_ENV_WHITELIST is set, that whitelist will be - used instead of the internal bitbake one. Alternatively, BB_ENV_EXTRAWHITE can be used - to extend the internal whitelist. - - Perforce fetcher fix to use commandline options instead of being overriden by the environment - - bb.utils.prunedir can cope with symlinks to directoriees without exceptions - - use @rev when doing a svn checkout - - Add osc fetcher (from Joshua Lock in Poky) - - When SRCREV autorevisioning for a recipe is in use, don't cache the recipe - - Add tryaltconfigs option to control whether bitbake trys using alternative providers - to fulfil failed dependencies. It defaults to off, changing the default since this - behaviour confuses many users and isn't often useful. - - Improve lock file function error handling - - Add username handling to the git fetcher (Robert Bragg) - - Add support for HTTP_PROXY and HTTP_PROXY_IGNORE variables to the wget fetcher - - Export more variables to the fetcher commands to allow ssh checkouts and checkouts through - proxies to work better. (from Poky) - - Also allow user and pswd options in SRC_URIs globally (from Poky) - - Improve proxy handling when using mirrors (from Poky) - - Add bb.utils.prune_suffix function - - Fix hg checkouts of specific revisions (from Poky) - - Fix wget fetching of urls with parameters specified (from Poky) - - Add username handling to git fetcher (from Poky) - - Set HOME environmental variable when running fetcher commands (from Poky) - - Make sure allowed variables inherited from the environment are exported again (from Poky) - - When running a stage task in bbshell, run populate_staging, not the stage task (from Poky) - - Fix + character escaping from PACKAGES_DYNAMIC (thanks Otavio Salvador) - - Addition of BBCLASSEXTEND support for allowing one recipe to provide multiple targets (from Poky) - -Changes in Bitbake 1.8.0: - - Release 1.7.x as a stable series - -Changes in BitBake 1.7.x: - - Major updates of the dependency handling and execution - of tasks. Code from bin/bitbake replaced with runqueue.py - and taskdata.py - - New task execution code supports multithreading with a simplistic - threading algorithm controlled by BB_NUMBER_THREADS - - Change of the SVN Fetcher to keep the checkout around - courtsey of Paul Sokolovsky (#1367) - - PATH fix to bbimage (#1108) - - Allow debug domains to be specified on the commandline (-l) - - Allow 'interactive' tasks - - Logging message improvements - - Drop now uneeded BUILD_ALL_DEPS variable - - Add support for wildcards to -b option - - Major overhaul of the fetchers making a large amount of code common - including mirroring code - - Fetchers now touch md5 stamps upon access (to show activity) - - Fix -f force option when used without -b (long standing bug) - - Add expand_cache to data_cache.py, caching expanded data (speedup) - - Allow version field in DEPENDS (ignored for now) - - Add abort flag support to the shell - - Make inherit fail if the class doesn't exist (#1478) - - Fix data.emit_env() to expand keynames as well as values - - Add ssh fetcher - - Add perforce fetcher - - Make PREFERRED_PROVIDER_foobar defaults to foobar if available - - Share the parser's mtime_cache, reducing the number of stat syscalls - - Compile all anonfuncs at once! - *** Anonfuncs must now use common spacing format *** - - Memorise the list of handlers in __BBHANDLERS and tasks in __BBTASKS - This removes 2 million function calls resulting in a 5-10% speedup - - Add manpage - - Update generateDotGraph to use taskData/runQueue improving accuracy - and also adding a task dependency graph - - Fix/standardise on GPLv2 licence - - Move most functionality from bin/bitbake to cooker.py and split into - separate funcitons - - CVS fetcher: Added support for non-default port - - Add BBINCLUDELOGS_LINES, the number of lines to read from any logfile - - Drop shebangs from lib/bb scripts - -Changes in Bitbake 1.6.0: - - Better msg handling - - COW dict implementation from Tim Ansell (mithro) leading - to better performance - - Speed up of -s - -Changes in Bitbake 1.4.4: - - SRCDATE now handling courtsey Justin Patrin - - #1017 fix to work with rm_work - -Changes in BitBake 1.4.2: - - Send logs to oe.pastebin.com instead of pastebin.com - fixes #856 - - Copy the internal bitbake data before building the - dependency graph. This fixes nano not having a - virtual/libc dependency - - Allow multiple TARBALL_STASH entries - - Cache, check if the directory exists before changing - into it - - git speedup cloning by not doing a checkout - - allow to have spaces in filenames (.conf, .bb, .bbclass) - -Changes in BitBake 1.4.0: - - Fix to check both RDEPENDS and RDEPENDS_${PN} - - Fix a RDEPENDS parsing bug in utils:explode_deps() - - Update git fetcher behaviour to match git changes - - ASSUME_PROVIDED allowed to include runtime packages - - git fetcher cleanup and efficency improvements - - Change the format of the cache - - Update usermanual to document the Fetchers - - Major changes to caching with a new strategy - giving a major performance increase when reparsing - with few data changes - -Changes in BitBake 1.3.3: - - Create a new Fetcher module to ease the - development of new Fetchers. - Issue #438 fixed by rpurdie@openedhand.com - - Make the Subversion fetcher honor the SRC Date - (CVSDATE). - Issue #555 fixed by chris@openedhand.com - - Expand PREFERRED_PROVIDER properly - Issue #436 fixed by rprudie@openedhand.com - - Typo fix for Issue #531 by Philipp Zabel for the - BitBake Shell - - Introduce a new special variable SRCDATE as - a generic naming to replace CVSDATE. - - Introduce a new keyword 'required'. In contrast - to 'include' parsing will fail if a to be included - file can not be found. - - Remove hardcoding of the STAMP directory. Patch - courtsey pHilipp Zabel - - Track the RDEPENDS of each package (rpurdie@openedhand.com) - - Introduce BUILD_ALL_DEPS to build all RDEPENDS. E.g - this is used by the OpenEmbedded Meta Packages. - (rpurdie@openedhand.com). - -Changes in BitBake 1.3.2: - - reintegration of make.py into BitBake - - bbread is gone, use bitbake -e - - lots of shell updates and bugfixes - - Introduction of the .= and =. operator - - Sort variables, keys and groups in bitdoc - - Fix regression in the handling of BBCOLLECTIONS - - Update the bitbake usermanual - -Changes in BitBake 1.3.0: - - add bitbake interactive shell (bitbake -i) - - refactor bitbake utility in OO style - - kill default arguments in methods in the bb.data module - - kill default arguments in methods in the bb.fetch module - - the http/https/ftp fetcher will fail if the to be - downloaded file was not found in DL_DIR (this is needed - to avoid unpacking the sourceforge mirror page) - - Switch to a cow like data instance for persistent and non - persisting mode (called data_smart.py) - - Changed the callback of bb.make.collect_bbfiles to carry - additional parameters - - Drastically reduced the amount of needed RAM by not holding - each data instance in memory when using a cache/persistent - storage - -Changes in BitBake 1.2.1: - The 1.2.1 release is meant as a intermediate release to lay the - ground for more radical changes. The most notable changes are: - - - Do not hardcode {}, use bb.data.init() instead if you want to - get a instance of a data class - - bb.data.init() is a factory and the old bb.data methods are delegates - - Do not use deepcopy use bb.data.createCopy() instead. - - Removed default arguments in bb.fetch - diff --git a/bitbake/LICENSE b/bitbake/LICENSE deleted file mode 100644 index 8458042303..0000000000 --- a/bitbake/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -BitBake is licensed under the GNU General Public License version 2.0. See -LICENSE.GPL-2.0-only for further details. - -Individual files contain the following style tags instead of the full license text: - - SPDX-License-Identifier: GPL-2.0-only - -This enables machine processing of license information based on the SPDX -License Identifiers that are here available: http://spdx.org/licenses/ - - -The following external components are distributed with this software: - -* The Toaster Simple UI application is based upon the Django project template, the files of which are covered by the BSD license and are copyright (c) Django Software -Foundation and individual contributors. - -* Twitter Bootstrap (including Glyphicons), redistributed under the MIT license -* jQuery is redistributed under the MIT license. - -* Twitter typeahead.js redistributed under the MIT license. Note that the JS source has one small modification, so the full unminified file is currently included to make it obvious where this is. - -* jsrender is redistributed under the MIT license. - -* QUnit is redistributed under the MIT license. - -* Font Awesome fonts redistributed under the SIL Open Font License 1.1 - -* simplediff is distributed under the zlib license. - diff --git a/bitbake/LICENSE.GPL-2.0-only b/bitbake/LICENSE.GPL-2.0-only deleted file mode 100644 index 5db3c0a21c..0000000000 --- a/bitbake/LICENSE.GPL-2.0-only +++ /dev/null @@ -1,288 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - -Note: -Individual files contain the following tag instead of the full license text. - - SPDX-License-Identifier: GPL-2.0-only - -This enables machine processing of license information based on the SPDX -License Identifiers that are here available: http://spdx.org/licenses/ diff --git a/bitbake/LICENSE.MIT b/bitbake/LICENSE.MIT deleted file mode 100644 index a6919eb7e1..0000000000 --- a/bitbake/LICENSE.MIT +++ /dev/null @@ -1,25 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -Note: -Individual files contain the following tag instead of the full license text. - - SPDX-License-Identifier: MIT - -This enables machine processing of license information based on the SPDX -License Identifiers that are here available: http://spdx.org/licenses/ diff --git a/bitbake/README b/bitbake/README deleted file mode 100644 index e9f4c858ee..0000000000 --- a/bitbake/README +++ /dev/null @@ -1,63 +0,0 @@ -Bitbake -======= - -BitBake is a generic task execution engine that allows shell and Python tasks to be run -efficiently and in parallel while working within complex inter-task dependency constraints. -One of BitBake's main users, OpenEmbedded, takes this core and builds embedded Linux software -stacks using a task-oriented approach. - -For information about Bitbake, see the OpenEmbedded website: - https://www.openembedded.org/ - -Bitbake plain documentation can be found under the doc directory or its integrated -html version at the Yocto Project website: - https://docs.yoctoproject.org - -Bitbake requires Python version 3.8 or newer. - -Contributing ------------- - -Please refer to our contributor guide here: https://docs.yoctoproject.org/contributor-guide/ -for full details on how to submit changes. - -As a quick guide, patches should be sent to bitbake-devel@lists.openembedded.org -The git command to do that would be: - - git send-email -M -1 --to bitbake-devel@lists.openembedded.org - -If you're sending a patch related to the BitBake manual, make sure you copy -the Yocto Project documentation mailing list: - - git send-email -M -1 --to bitbake-devel@lists.openembedded.org --cc docs@lists.yoctoproject.org - -Mailing list: - - https://lists.openembedded.org/g/bitbake-devel - -Source code: - - https://git.openembedded.org/bitbake/ - -Testing -------- - -Bitbake has a testsuite located in lib/bb/tests/ whichs aim to try and prevent regressions. -You can run this with "bitbake-selftest". In particular the fetcher is well covered since -it has so many corner cases. The datastore has many tests too. Testing with the testsuite is -recommended before submitting patches, particularly to the fetcher and datastore. We also -appreciate new test cases and may require them for more obscure issues. - -To run the tests "zstd" and "git" must be installed. - -The assumption is made that this testsuite is run from an initialized OpenEmbedded build -environment (i.e. `source oe-init-build-env` is used). If this is not the case, run the -testsuite as follows: - - export PATH=$(pwd)/bin:$PATH - bin/bitbake-selftest - -The testsuite can alternatively be executed using pytest, e.g. obtained from PyPI (in this -case, the PATH is configured automatically): - - pytest diff --git a/bitbake/SECURITY.md b/bitbake/SECURITY.md deleted file mode 100644 index 7d2ce1f631..0000000000 --- a/bitbake/SECURITY.md +++ /dev/null @@ -1,24 +0,0 @@ -How to Report a Potential Vulnerability? -======================================== - -If you would like to report a public issue (for example, one with a released -CVE number), please report it using the -[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla]. -If you have a patch ready, submit it following the same procedure as any other -patch as described in README.md. - -If you are dealing with a not-yet released or urgent issue, please send a -message to security AT yoctoproject DOT org, including as many details as -possible: the layer or software module affected, the recipe and its version, -and any example code, if available. - -Branches maintained with security fixes ---------------------------------------- - -See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS] -for detailed info regarding the policies and maintenance of Stable branches. - -The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all -releases of the Yocto Project. Versions in grey are no longer actively maintained with -security patches, but well-tested patches may still be accepted for them for -significant issues. diff --git a/bitbake/bin/bitbake b/bitbake/bin/bitbake deleted file mode 100755 index 3acf53229b..0000000000 --- a/bitbake/bin/bitbake +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer -# Copyright (C) 2005 Holger Hans Peter Freyther -# Copyright (C) 2005 ROAD GmbH -# Copyright (C) 2006 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import sys -import warnings -warnings.simplefilter("default") - -sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), - 'lib')) -try: - import bb -except RuntimeError as exc: - sys.exit(str(exc)) - -from bb import cookerdata -from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException - -bb.utils.check_system_locale() - -__version__ = "2.15.2" - -if __name__ == "__main__": - if __version__ != bb.__version__: - sys.exit("Bitbake core version and program version mismatch!") - try: - sys.exit(bitbake_main(BitBakeConfigParameters(sys.argv), - cookerdata.CookerConfiguration())) - except BBMainException as err: - sys.exit(err) - except bb.BBHandledException: - sys.exit(1) - except Exception: - import traceback - traceback.print_exc() - sys.exit(1) diff --git a/bitbake/bin/bitbake-config-build b/bitbake/bin/bitbake-config-build deleted file mode 120000 index 11e6df80c4..0000000000 --- a/bitbake/bin/bitbake-config-build +++ /dev/null @@ -1 +0,0 @@ -bitbake-layers \ No newline at end of file diff --git a/bitbake/bin/bitbake-diffsigs b/bitbake/bin/bitbake-diffsigs deleted file mode 100755 index 9d6cb8c944..0000000000 --- a/bitbake/bin/bitbake-diffsigs +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/env python3 - -# bitbake-diffsigs / bitbake-dumpsig -# BitBake task signature data dump and comparison utility -# -# Copyright (C) 2012-2013, 2017 Intel Corporation -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import sys -import warnings - -warnings.simplefilter("default") -import argparse -import logging -import pickle - -sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib')) - -import bb.tinfoil -import bb.siggen -import bb.msg - -myname = os.path.basename(sys.argv[0]) -logger = bb.msg.logger_create(myname) - -is_dump = myname == 'bitbake-dumpsig' - - -def find_siginfo(tinfoil, pn, taskname, sigs=None): - result = None - tinfoil.set_event_mask(['bb.event.FindSigInfoResult', - 'logging.LogRecord', - 'bb.command.CommandCompleted', - 'bb.command.CommandFailed']) - ret = tinfoil.run_command('findSigInfo', pn, taskname, sigs) - if ret: - while True: - event = tinfoil.wait_event(1) - if event: - if isinstance(event, bb.command.CommandCompleted): - break - elif isinstance(event, bb.command.CommandFailed): - logger.error(str(event)) - sys.exit(2) - elif isinstance(event, bb.event.FindSigInfoResult): - result = event.result - elif isinstance(event, logging.LogRecord): - logger.handle(event) - else: - logger.error('No result returned from findSigInfo command') - sys.exit(2) - return result - - -def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None): - """ Find the most recent signature files for the specified PN/task """ - - if not taskname.startswith('do_'): - taskname = 'do_%s' % taskname - - if sig1 and sig2: - sigfiles = find_siginfo(bbhandler, pn, taskname, [sig1, sig2]) - if not sigfiles: - logger.error('No sigdata files found matching %s %s matching either %s or %s' % (pn, taskname, sig1, sig2)) - sys.exit(1) - elif sig1 not in sigfiles: - logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig1)) - sys.exit(1) - elif sig2 not in sigfiles: - logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2)) - sys.exit(1) - - latestfiles = [sigfiles[sig1]['path'], sigfiles[sig2]['path']] - else: - sigfiles = find_siginfo(bbhandler, pn, taskname) - latestsigs = sorted(sigfiles.keys(), key=lambda h: sigfiles[h]['time'])[-2:] - if not latestsigs: - logger.error('No sigdata files found matching %s %s' % (pn, taskname)) - sys.exit(1) - latestfiles = [sigfiles[latestsigs[0]]['path']] - if len(latestsigs) > 1: - latestfiles.append(sigfiles[latestsigs[1]]['path']) - - return latestfiles - - -# Define recursion callback -def recursecb(key, hash1, hash2): - hashes = [hash1, hash2] - hashfiles = find_siginfo(tinfoil, key, None, hashes) - - recout = [] - if not hashfiles: - recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2)) - elif hash1 not in hashfiles: - recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1)) - elif hash2 not in hashfiles: - recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2)) - else: - out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, color=color) - for change in out2: - for line in change.splitlines(): - recout.append(' ' + line) - - return recout - - -parser = argparse.ArgumentParser( - description=("Dumps" if is_dump else "Compares") + " siginfo/sigdata files written out by BitBake") - -parser.add_argument('-D', '--debug', - help='Enable debug output', - action='store_true') - -if is_dump: - parser.add_argument("-t", "--task", - help="find the signature data file for the last run of the specified task", - action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname')) - - parser.add_argument("sigdatafile1", - help="Signature file to dump. Not used when using -t/--task.", - action="store", nargs='?', metavar="sigdatafile") -else: - parser.add_argument('-c', '--color', - help='Colorize the output (where %(metavar)s is %(choices)s)', - choices=['auto', 'always', 'never'], default='auto', metavar='color') - - parser.add_argument('-d', '--dump', - help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)', - action='store_true') - - parser.add_argument("-t", "--task", - help="find the signature data files for the last two runs of the specified task and compare them", - action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname')) - - parser.add_argument("-s", "--signature", - help="With -t/--task, specify the signatures to look for instead of taking the last two", - action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig')) - - parser.add_argument("sigdatafile1", - help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.", - action="store", nargs='?') - - parser.add_argument("sigdatafile2", - help="Second signature file to compare", - action="store", nargs='?') - -options = parser.parse_args() -if is_dump: - options.color = 'never' - options.dump = True - options.sigdatafile2 = None - options.sigargs = None - -if options.debug: - logger.setLevel(logging.DEBUG) - -color = (options.color == 'always' or (options.color == 'auto' and sys.stdout.isatty())) - -if options.taskargs: - with bb.tinfoil.Tinfoil() as tinfoil: - tinfoil.prepare(config_only=True) - if not options.dump and options.sigargs: - files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], - options.sigargs[1]) - else: - files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1]) - - if options.dump: - logger.debug("Signature file: %s" % files[-1]) - output = bb.siggen.dump_sigfile(files[-1]) - else: - if len(files) < 2: - logger.error('Only one matching sigdata file found for the specified task (%s %s)' % ( - options.taskargs[0], options.taskargs[1])) - sys.exit(1) - - # Recurse into signature comparison - logger.debug("Signature file (previous): %s" % files[-2]) - logger.debug("Signature file (latest): %s" % files[-1]) - output = bb.siggen.compare_sigfiles(files[-2], files[-1], recursecb, color=color) -else: - if options.sigargs: - logger.error('-s/--signature can only be used together with -t/--task') - sys.exit(1) - try: - if not options.dump and options.sigdatafile1 and options.sigdatafile2: - with bb.tinfoil.Tinfoil() as tinfoil: - tinfoil.prepare(config_only=True) - output = bb.siggen.compare_sigfiles(options.sigdatafile1, options.sigdatafile2, recursecb, color=color) - elif options.sigdatafile1: - output = bb.siggen.dump_sigfile(options.sigdatafile1) - else: - logger.error('Must specify signature file(s) or -t/--task') - parser.print_help() - sys.exit(1) - except IOError as e: - logger.error(str(e)) - sys.exit(1) - except (pickle.UnpicklingError, EOFError): - logger.error('Invalid signature data - ensure you are specifying sigdata/siginfo files') - sys.exit(1) - -if output: - print('\n'.join(output)) diff --git a/bitbake/bin/bitbake-dumpsig b/bitbake/bin/bitbake-dumpsig deleted file mode 120000 index b1e8489b45..0000000000 --- a/bitbake/bin/bitbake-dumpsig +++ /dev/null @@ -1 +0,0 @@ -bitbake-diffsigs \ No newline at end of file diff --git a/bitbake/bin/bitbake-getvar b/bitbake/bin/bitbake-getvar deleted file mode 100755 index 378fb13572..0000000000 --- a/bitbake/bin/bitbake-getvar +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python3 -# -# Copyright (C) 2021 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import argparse -import io -import os -import sys -import warnings -import logging -warnings.simplefilter("default") - -bindir = os.path.dirname(__file__) -topdir = os.path.dirname(bindir) -sys.path[0:0] = [os.path.join(topdir, 'lib')] - -import bb.providers -import bb.tinfoil - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Bitbake Query Variable") - parser.add_argument("variable", help="variable name to query") - parser.add_argument("-r", "--recipe", help="Recipe name to query", default=None, required=False) - parser.add_argument('-u', '--unexpand', help='Do not expand the value (with --value)', action="store_true") - parser.add_argument('-f', '--flag', help='Specify a variable flag to query (with --value)', default=None) - parser.add_argument('--value', help='Only report the value, no history and no variable name', action="store_true") - parser.add_argument('-q', '--quiet', help='Silence bitbake server logging', action="store_true") - parser.add_argument('--ignore-undefined', help='Suppress any errors related to undefined variables', action="store_true") - args = parser.parse_args() - - if not args.value: - if args.unexpand: - sys.exit("--unexpand only makes sense with --value") - - if args.flag: - sys.exit("--flag only makes sense with --value") - - quiet = args.quiet or args.value - if quiet: - logger = logging.getLogger("BitBake") - logger.setLevel(logging.WARNING) - - with bb.tinfoil.Tinfoil(tracking=True, setup_logging=not quiet) as tinfoil: - if args.recipe: - tinfoil.prepare(quiet=3 if quiet else 2) - try: - d = tinfoil.parse_recipe(args.recipe) - except bb.providers.NoProvider as e: - sys.exit(str(e)) - else: - tinfoil.prepare(quiet=2, config_only=True) - # Expand keys and run anonymous functions to get identical result to - # "bitbake -e" - d = tinfoil.finalizeData() - - value = None - if args.flag: - value = d.getVarFlag(args.variable, args.flag, expand=not args.unexpand) - if value is None and not args.ignore_undefined: - sys.exit(f"The flag '{args.flag}' is not defined for variable '{args.variable}'") - else: - value = d.getVar(args.variable, expand=not args.unexpand) - if value is None and not args.ignore_undefined: - sys.exit(f"The variable '{args.variable}' is not defined") - if args.value: - print(str(value if value is not None else "")) - else: - bb.data.emit_var(args.variable, d=d, all=True) diff --git a/bitbake/bin/bitbake-hashclient b/bitbake/bin/bitbake-hashclient deleted file mode 100755 index b8755c5797..0000000000 --- a/bitbake/bin/bitbake-hashclient +++ /dev/null @@ -1,462 +0,0 @@ -#! /usr/bin/env python3 -# -# Copyright (C) 2019 Garmin Ltd. -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import argparse -import hashlib -import logging -import os -import pprint -import sys -import threading -import time -import warnings -import netrc -import json -import statistics -import textwrap -warnings.simplefilter("default") - -try: - import tqdm - ProgressBar = tqdm.tqdm -except ImportError: - class ProgressBar(object): - def __init__(self, *args, **kwargs): - pass - - def __enter__(self): - return self - - def __exit__(self, *args, **kwargs): - pass - - def update(self): - pass - -sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib')) - -import hashserv -import bb.asyncrpc - -DEFAULT_ADDRESS = 'unix://./hashserve.sock' -METHOD = 'stress.test.method' - -def print_user(u): - print(f"Username: {u['username']}") - if "permissions" in u: - print("Permissions: " + " ".join(u["permissions"])) - if "token" in u: - print(f"Token: {u['token']}") - - -def main(): - def handle_get(args, client): - result = client.get_taskhash(args.method, args.taskhash, all_properties=True) - if not result: - return 0 - - print(json.dumps(result, sort_keys=True, indent=4)) - return 0 - - def handle_get_outhash(args, client): - result = client.get_outhash(args.method, args.outhash, args.taskhash) - if not result: - return 0 - - print(json.dumps(result, sort_keys=True, indent=4)) - return 0 - - def handle_stats(args, client): - if args.reset: - s = client.reset_stats() - else: - s = client.get_stats() - print(json.dumps(s, sort_keys=True, indent=4)) - return 0 - - def handle_stress(args, client): - def thread_main(pbar, lock): - nonlocal found_hashes - nonlocal missed_hashes - nonlocal max_time - nonlocal times - - with hashserv.create_client(args.address) as client: - for i in range(args.requests): - taskhash = hashlib.sha256() - taskhash.update(args.taskhash_seed.encode('utf-8')) - taskhash.update(str(i).encode('utf-8')) - - start_time = time.perf_counter() - l = client.get_unihash(METHOD, taskhash.hexdigest()) - elapsed = time.perf_counter() - start_time - - with lock: - if l: - found_hashes += 1 - else: - missed_hashes += 1 - - times.append(elapsed) - pbar.update() - - max_time = 0 - found_hashes = 0 - missed_hashes = 0 - lock = threading.Lock() - times = [] - start_time = time.perf_counter() - with ProgressBar(total=args.clients * args.requests) as pbar: - threads = [threading.Thread(target=thread_main, args=(pbar, lock), daemon=False) for _ in range(args.clients)] - for t in threads: - t.start() - - for t in threads: - t.join() - total_elapsed = time.perf_counter() - start_time - - with lock: - mean = statistics.mean(times) - median = statistics.median(times) - stddev = statistics.pstdev(times) - - print(f"Number of clients: {args.clients}") - print(f"Requests per client: {args.requests}") - print(f"Number of requests: {len(times)}") - print(f"Total elapsed time: {total_elapsed:.3f}s") - print(f"Total request rate: {len(times)/total_elapsed:.3f} req/s") - print(f"Average request time: {mean:.3f}s") - print(f"Median request time: {median:.3f}s") - print(f"Request time std dev: {stddev:.3f}s") - print(f"Maximum request time: {max(times):.3f}s") - print(f"Minimum request time: {min(times):.3f}s") - print(f"Hashes found: {found_hashes}") - print(f"Hashes missed: {missed_hashes}") - - if args.report: - with ProgressBar(total=args.requests) as pbar: - for i in range(args.requests): - taskhash = hashlib.sha256() - taskhash.update(args.taskhash_seed.encode('utf-8')) - taskhash.update(str(i).encode('utf-8')) - - outhash = hashlib.sha256() - outhash.update(args.outhash_seed.encode('utf-8')) - outhash.update(str(i).encode('utf-8')) - - client.report_unihash(taskhash.hexdigest(), METHOD, outhash.hexdigest(), taskhash.hexdigest()) - - with lock: - pbar.update() - - def handle_remove(args, client): - where = {k: v for k, v in args.where} - if where: - result = client.remove(where) - print("Removed %d row(s)" % (result["count"])) - else: - print("No query specified") - - def handle_clean_unused(args, client): - result = client.clean_unused(args.max_age) - print("Removed %d rows" % (result["count"])) - return 0 - - def handle_refresh_token(args, client): - r = client.refresh_token(args.username) - print_user(r) - - def handle_set_user_permissions(args, client): - r = client.set_user_perms(args.username, args.permissions) - print_user(r) - - def handle_get_user(args, client): - r = client.get_user(args.username) - print_user(r) - - def handle_get_all_users(args, client): - users = client.get_all_users() - print("{username:20}| {permissions}".format(username="Username", permissions="Permissions")) - print(("-" * 20) + "+" + ("-" * 20)) - for u in users: - print("{username:20}| {permissions}".format(username=u["username"], permissions=" ".join(u["permissions"]))) - - def handle_new_user(args, client): - r = client.new_user(args.username, args.permissions) - print_user(r) - - def handle_delete_user(args, client): - r = client.delete_user(args.username) - print_user(r) - - def handle_get_db_usage(args, client): - usage = client.get_db_usage() - print(usage) - tables = sorted(usage.keys()) - print("{name:20}| {rows:20}".format(name="Table name", rows="Rows")) - print(("-" * 20) + "+" + ("-" * 20)) - for t in tables: - print("{name:20}| {rows:<20}".format(name=t, rows=usage[t]["rows"])) - print() - - total_rows = sum(t["rows"] for t in usage.values()) - print(f"Total rows: {total_rows}") - - def handle_get_db_query_columns(args, client): - columns = client.get_db_query_columns() - print("\n".join(sorted(columns))) - - def handle_gc_status(args, client): - result = client.gc_status() - if not result["mark"]: - print("No Garbage collection in progress") - return 0 - - print("Current Mark: %s" % result["mark"]) - print("Total hashes to keep: %d" % result["keep"]) - print("Total hashes to remove: %s" % result["remove"]) - return 0 - - def handle_gc_mark(args, client): - where = {k: v for k, v in args.where} - result = client.gc_mark(args.mark, where) - print("New hashes marked: %d" % result["count"]) - return 0 - - def handle_gc_mark_stream(args, client): - stdin = (l.strip() for l in sys.stdin) - marked_hashes = 0 - - try: - result = client.gc_mark_stream(args.mark, stdin) - marked_hashes = result["count"] - except ConnectionError: - logger.warning( - "Server doesn't seem to support `gc-mark-stream`. Sending " - "hashes sequentially using `gc-mark` API." - ) - for line in stdin: - pairs = line.split() - condition = dict(zip(pairs[::2], pairs[1::2])) - result = client.gc_mark(args.mark, condition) - marked_hashes += result["count"] - - print("New hashes marked: %d" % marked_hashes) - return 0 - - def handle_gc_sweep(args, client): - result = client.gc_sweep(args.mark) - print("Removed %d rows" % result["count"]) - return 0 - - def handle_unihash_exists(args, client): - result = client.unihash_exists(args.unihash) - if args.quiet: - return 0 if result else 1 - - print("true" if result else "false") - return 0 - - def handle_ping(args, client): - times = [] - for i in range(1, args.count + 1): - if not args.quiet: - print(f"Ping {i} of {args.count}... ", end="") - start_time = time.perf_counter() - client.ping() - elapsed = time.perf_counter() - start_time - times.append(elapsed) - if not args.quiet: - print(f"{elapsed:.3f}s") - - mean = statistics.mean(times) - median = statistics.median(times) - std_dev = statistics.pstdev(times) - - if not args.quiet: - print("------------------------") - print(f"Number of pings: {len(times)}") - print(f"Average round trip time: {mean:.3f}s") - print(f"Median round trip time: {median:.3f}s") - print(f"Round trip time std dev: {std_dev:.3f}s") - print(f"Min time is: {min(times):.3f}s") - print(f"Max time is: {max(times):.3f}s") - return 0 - - parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description='Hash Equivalence Client', - epilog=textwrap.dedent( - """ - Possible ADDRESS options are: - unix://PATH Connect to UNIX domain socket at PATH - ws://HOST[:PORT] Connect to websocket at HOST:PORT (default port is 80) - wss://HOST[:PORT] Connect to secure websocket at HOST:PORT (default port is 443) - HOST:PORT Connect to TCP server at HOST:PORT - """ - ), - ) - parser.add_argument('--address', default=DEFAULT_ADDRESS, help='Server address (default "%(default)s")') - parser.add_argument('--log', default='WARNING', help='Set logging level') - parser.add_argument('--login', '-l', metavar="USERNAME", help="Authenticate as USERNAME") - parser.add_argument('--password', '-p', metavar="TOKEN", help="Authenticate using token TOKEN") - parser.add_argument('--become', '-b', metavar="USERNAME", help="Impersonate user USERNAME (if allowed) when performing actions") - parser.add_argument('--no-netrc', '-n', action="store_false", dest="netrc", help="Do not use .netrc") - - subparsers = parser.add_subparsers() - - get_parser = subparsers.add_parser('get', help="Get the unihash for a taskhash") - get_parser.add_argument("method", help="Method to query") - get_parser.add_argument("taskhash", help="Task hash to query") - get_parser.set_defaults(func=handle_get) - - get_outhash_parser = subparsers.add_parser('get-outhash', help="Get output hash information") - get_outhash_parser.add_argument("method", help="Method to query") - get_outhash_parser.add_argument("outhash", help="Output hash to query") - get_outhash_parser.add_argument("taskhash", help="Task hash to query") - get_outhash_parser.set_defaults(func=handle_get_outhash) - - stats_parser = subparsers.add_parser('stats', help='Show server stats') - stats_parser.add_argument('--reset', action='store_true', - help='Reset server stats') - stats_parser.set_defaults(func=handle_stats) - - stress_parser = subparsers.add_parser('stress', help='Run stress test') - stress_parser.add_argument('--clients', type=int, default=10, - help='Number of simultaneous clients') - stress_parser.add_argument('--requests', type=int, default=1000, - help='Number of requests each client will perform') - stress_parser.add_argument('--report', action='store_true', - help='Report new hashes') - stress_parser.add_argument('--taskhash-seed', default='', - help='Include string in taskhash') - stress_parser.add_argument('--outhash-seed', default='', - help='Include string in outhash') - stress_parser.set_defaults(func=handle_stress) - - remove_parser = subparsers.add_parser('remove', help="Remove hash entries") - remove_parser.add_argument("--where", "-w", metavar="KEY VALUE", nargs=2, action="append", default=[], - help="Remove entries from table where KEY == VALUE") - remove_parser.set_defaults(func=handle_remove) - - clean_unused_parser = subparsers.add_parser('clean-unused', help="Remove unused database entries") - clean_unused_parser.add_argument("max_age", metavar="SECONDS", type=int, help="Remove unused entries older than SECONDS old") - clean_unused_parser.set_defaults(func=handle_clean_unused) - - refresh_token_parser = subparsers.add_parser('refresh-token', help="Refresh auth token") - refresh_token_parser.add_argument("--username", "-u", help="Refresh the token for another user (if authorized)") - refresh_token_parser.set_defaults(func=handle_refresh_token) - - set_user_perms_parser = subparsers.add_parser('set-user-perms', help="Set new permissions for user") - set_user_perms_parser.add_argument("--username", "-u", help="Username", required=True) - set_user_perms_parser.add_argument("permissions", metavar="PERM", nargs="*", default=[], help="New permissions") - set_user_perms_parser.set_defaults(func=handle_set_user_permissions) - - get_user_parser = subparsers.add_parser('get-user', help="Get user") - get_user_parser.add_argument("--username", "-u", help="Username") - get_user_parser.set_defaults(func=handle_get_user) - - get_all_users_parser = subparsers.add_parser('get-all-users', help="List all users") - get_all_users_parser.set_defaults(func=handle_get_all_users) - - new_user_parser = subparsers.add_parser('new-user', help="Create new user") - new_user_parser.add_argument("--username", "-u", help="Username", required=True) - new_user_parser.add_argument("permissions", metavar="PERM", nargs="*", default=[], help="New permissions") - new_user_parser.set_defaults(func=handle_new_user) - - delete_user_parser = subparsers.add_parser('delete-user', help="Delete user") - delete_user_parser.add_argument("--username", "-u", help="Username", required=True) - delete_user_parser.set_defaults(func=handle_delete_user) - - db_usage_parser = subparsers.add_parser('get-db-usage', help="Database Usage") - db_usage_parser.set_defaults(func=handle_get_db_usage) - - db_query_columns_parser = subparsers.add_parser('get-db-query-columns', help="Show columns that can be used in database queries") - db_query_columns_parser.set_defaults(func=handle_get_db_query_columns) - - gc_status_parser = subparsers.add_parser("gc-status", help="Show garbage collection status") - gc_status_parser.set_defaults(func=handle_gc_status) - - gc_mark_parser = subparsers.add_parser('gc-mark', help="Mark hashes to be kept for garbage collection") - gc_mark_parser.add_argument("mark", help="Mark for this garbage collection operation") - gc_mark_parser.add_argument("--where", "-w", metavar="KEY VALUE", nargs=2, action="append", default=[], - help="Keep entries in table where KEY == VALUE") - gc_mark_parser.set_defaults(func=handle_gc_mark) - - gc_mark_parser_stream = subparsers.add_parser( - 'gc-mark-stream', - help=( - "Mark multiple hashes to be retained for garbage collection. Input should be provided via stdin, " - "with each line formatted as key-value pairs separated by spaces, for example 'column1 foo column2 bar'." - ) - ) - gc_mark_parser_stream.add_argument("mark", help="Mark for this garbage collection operation") - gc_mark_parser_stream.set_defaults(func=handle_gc_mark_stream) - - gc_sweep_parser = subparsers.add_parser('gc-sweep', help="Perform garbage collection and delete any entries that are not marked") - gc_sweep_parser.add_argument("mark", help="Mark for this garbage collection operation") - gc_sweep_parser.set_defaults(func=handle_gc_sweep) - - unihash_exists_parser = subparsers.add_parser('unihash-exists', help="Check if a unihash is known to the server") - unihash_exists_parser.add_argument("--quiet", action="store_true", help="Don't print status. Instead, exit with 0 if unihash exists and 1 if it does not") - unihash_exists_parser.add_argument("unihash", help="Unihash to check") - unihash_exists_parser.set_defaults(func=handle_unihash_exists) - - ping_parser = subparsers.add_parser('ping', help="Ping server") - ping_parser.add_argument("-n", "--count", type=int, help="Number of pings. Default is %(default)s", default=10) - ping_parser.add_argument("-q", "--quiet", action="store_true", help="Don't print each ping; only print results") - ping_parser.set_defaults(func=handle_ping) - - args = parser.parse_args() - - logger = logging.getLogger('hashserv') - - level = getattr(logging, args.log.upper(), None) - if not isinstance(level, int): - raise ValueError('Invalid log level: %s' % args.log) - - logger.setLevel(level) - console = logging.StreamHandler() - console.setLevel(level) - logger.addHandler(console) - - login = args.login - password = args.password - - if login is None and args.netrc: - try: - n = netrc.netrc() - auth = n.authenticators(args.address) - if auth is not None: - login, _, password = auth - except FileNotFoundError: - pass - except netrc.NetrcParseError as e: - sys.stderr.write(f"Error parsing {e.filename}:{e.lineno}: {e.msg}\n") - - func = getattr(args, 'func', None) - if func: - try: - with hashserv.create_client(args.address, login, password) as client: - if args.become: - client.become_user(args.become) - return func(args, client) - except bb.asyncrpc.InvokeError as e: - print(f"ERROR: {e}") - return 1 - - return 0 - - -if __name__ == '__main__': - try: - ret = main() - except Exception: - ret = 1 - import traceback - traceback.print_exc() - sys.exit(ret) diff --git a/bitbake/bin/bitbake-hashserv b/bitbake/bin/bitbake-hashserv deleted file mode 100755 index 01503736b9..0000000000 --- a/bitbake/bin/bitbake-hashserv +++ /dev/null @@ -1,179 +0,0 @@ -#! /usr/bin/env python3 -# -# Copyright (C) 2018 Garmin Ltd. -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import sys -import logging -import argparse -import sqlite3 -import warnings - -warnings.simplefilter("default") - -sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib")) - -import hashserv -from hashserv.server import DEFAULT_ANON_PERMS - -VERSION = "1.0.0" - -DEFAULT_BIND = "unix://./hashserve.sock" - - -def main(): - parser = argparse.ArgumentParser( - description="Hash Equivalence Reference Server. Version=%s" % VERSION, - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -The bind address may take one of the following formats: - unix://PATH - Bind to unix domain socket at PATH - ws://ADDRESS:PORT - Bind to websocket on ADDRESS:PORT - ADDRESS:PORT - Bind to raw TCP socket on ADDRESS:PORT - -To bind to all addresses, leave the ADDRESS empty, e.g. "--bind :8686" or -"--bind ws://:8686". To bind to a specific IPv6 address, enclose the address in -"[]", e.g. "--bind [::1]:8686" or "--bind ws://[::1]:8686" - -Note that the default Anonymous permissions are designed to not break existing -server instances when upgrading, but are not particularly secure defaults. If -you want to use authentication, it is recommended that you use "--anon-perms -@read" to only give anonymous users read access, or "--anon-perms @none" to -give un-authenticated users no access at all. - -Setting "--anon-perms @all" or "--anon-perms @user-admin" is not allowed, since -this would allow anonymous users to manage all users accounts, which is a bad -idea. - -If you are using user authentication, you should run your server in websockets -mode with an SSL terminating load balancer in front of it (as this server does -not implement SSL). Otherwise all usernames and passwords will be transmitted -in the clear. When configured this way, clients can connect using a secure -websocket, as in "wss://SERVER:PORT" - -The following permissions are supported by the server: - - @none - No permissions - @read - The ability to read equivalent hashes from the server - @report - The ability to report equivalent hashes to the server - @db-admin - Manage the hash database(s). This includes cleaning the - database, removing hashes, etc. - @user-admin - The ability to manage user accounts. This includes, creating - users, deleting users, resetting login tokens, and assigning - permissions. - @all - All possible permissions, including any that may be added - in the future - """, - ) - - parser.add_argument( - "-b", - "--bind", - default=os.environ.get("HASHSERVER_BIND", DEFAULT_BIND), - help='Bind address (default $HASHSERVER_BIND, "%(default)s")', - ) - parser.add_argument( - "-d", - "--database", - default=os.environ.get("HASHSERVER_DB", "./hashserv.db"), - help='Database file (default $HASHSERVER_DB, "%(default)s")', - ) - parser.add_argument( - "-l", - "--log", - default=os.environ.get("HASHSERVER_LOG_LEVEL", "WARNING"), - help='Set logging level (default $HASHSERVER_LOG_LEVEL, "%(default)s")', - ) - parser.add_argument( - "-u", - "--upstream", - default=os.environ.get("HASHSERVER_UPSTREAM", None), - help="Upstream hashserv to pull hashes from ($HASHSERVER_UPSTREAM)", - ) - parser.add_argument( - "-r", - "--read-only", - action="store_true", - help="Disallow write operations from clients ($HASHSERVER_READ_ONLY)", - ) - parser.add_argument( - "--db-username", - default=os.environ.get("HASHSERVER_DB_USERNAME", None), - help="Database username ($HASHSERVER_DB_USERNAME)", - ) - parser.add_argument( - "--db-password", - default=os.environ.get("HASHSERVER_DB_PASSWORD", None), - help="Database password ($HASHSERVER_DB_PASSWORD)", - ) - parser.add_argument( - "--anon-perms", - metavar="PERM[,PERM[,...]]", - default=os.environ.get("HASHSERVER_ANON_PERMS", ",".join(DEFAULT_ANON_PERMS)), - help='Permissions to give anonymous users (default $HASHSERVER_ANON_PERMS, "%(default)s")', - ) - parser.add_argument( - "--admin-user", - default=os.environ.get("HASHSERVER_ADMIN_USER", None), - help="Create default admin user with name ADMIN_USER ($HASHSERVER_ADMIN_USER)", - ) - parser.add_argument( - "--admin-password", - default=os.environ.get("HASHSERVER_ADMIN_PASSWORD", None), - help="Create default admin user with password ADMIN_PASSWORD ($HASHSERVER_ADMIN_PASSWORD)", - ) - parser.add_argument( - "--reuseport", - action="store_true", - help="Enable SO_REUSEPORT, allowing multiple servers to bind to the same port for load balancing", - ) - - args = parser.parse_args() - - logger = logging.getLogger("hashserv") - - level = getattr(logging, args.log.upper(), None) - if not isinstance(level, int): - raise ValueError( - "Invalid log level: %s (Try ERROR/WARNING/INFO/DEBUG)" % args.log - ) - - logger.setLevel(level) - console = logging.StreamHandler() - console.setLevel(level) - logger.addHandler(console) - - read_only = (os.environ.get("HASHSERVER_READ_ONLY", "0") == "1") or args.read_only - if "," in args.anon_perms: - anon_perms = args.anon_perms.split(",") - else: - anon_perms = args.anon_perms.split() - - server = hashserv.create_server( - args.bind, - args.database, - upstream=args.upstream, - read_only=read_only, - db_username=args.db_username, - db_password=args.db_password, - anon_perms=anon_perms, - admin_username=args.admin_user, - admin_password=args.admin_password, - reuseport=args.reuseport, - ) - server.serve_forever() - return 0 - - -if __name__ == "__main__": - try: - ret = main() - except Exception: - ret = 1 - import traceback - - traceback.print_exc() - sys.exit(ret) diff --git a/bitbake/bin/bitbake-layers b/bitbake/bin/bitbake-layers deleted file mode 100755 index 341ecbcd97..0000000000 --- a/bitbake/bin/bitbake-layers +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python3 - -# This script has subcommands which operate against your bitbake layers, either -# displaying useful information, or acting against them. -# See the help output for details on available commands. - -# Copyright (C) 2011 Mentor Graphics Corporation -# Copyright (C) 2011-2015 Intel Corporation -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import logging -import os -import sys -import argparse -import warnings -warnings.simplefilter("default") - -bindir = os.path.dirname(__file__) -toolname = os.path.basename(__file__).split(".")[0] -topdir = os.path.dirname(bindir) -sys.path[0:0] = [os.path.join(topdir, 'lib')] - -import bb.tinfoil -import bb.msg - -logger = bb.msg.logger_create(toolname, sys.stdout) - -def main(): - parser = argparse.ArgumentParser( - description="BitBake layers utility", - epilog="Use %(prog)s --help to get help on a specific command", - add_help=False) - parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true') - parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true') - parser.add_argument('-F', '--force', help='Forced execution: can be specified multiple times. -F will force add without recipe parse verification and -FF will additionally force the run withput layer parsing.', action='count', default=0) - parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR') - - global_args, unparsed_args = parser.parse_known_args() - - # Help is added here rather than via add_help=True, as we don't want it to - # be handled by parse_known_args() - parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, - help='show this help message and exit') - subparsers = parser.add_subparsers(title='subcommands', metavar='') - subparsers.required = True - - if global_args.debug: - logger.setLevel(logging.DEBUG) - elif global_args.quiet: - logger.setLevel(logging.ERROR) - - # Need to re-run logger_create with color argument - # (will be the same logger since it has the same name) - bb.msg.logger_create('bitbake-layers', output=sys.stdout, - color=global_args.color, - level=logger.getEffectiveLevel()) - - plugins = [] - with bb.tinfoil.Tinfoil(tracking=True) as tinfoil: - tinfoil.logger.setLevel(logger.getEffectiveLevel()) - - if global_args.force > 1: - bbpaths = [] - else: - tinfoil.prepare(True) - bbpaths = tinfoil.config_data.getVar('BBPATH').split(':') - - for path in ([topdir] + bbpaths): - pluginbasepath = {"bitbake-layers":'bblayers', 'bitbake-config-build':'bbconfigbuild'}[toolname] - pluginpath = os.path.join(path, 'lib', pluginbasepath) - bb.utils.load_plugins(logger, plugins, pluginpath) - - registered = False - for plugin in plugins: - if hasattr(plugin, 'tinfoil_init') and global_args.force <= 1: - plugin.tinfoil_init(tinfoil) - if hasattr(plugin, 'register_commands'): - registered = True - plugin.register_commands(subparsers) - - if not registered: - logger.error("No commands registered - missing plugins?") - sys.exit(1) - - args = parser.parse_args(unparsed_args, namespace=global_args) - - if getattr(args, 'parserecipes', False): - tinfoil.config_data.disableTracking() - tinfoil.parse_recipes() - tinfoil.config_data.enableTracking() - - return args.func(args) - - -if __name__ == "__main__": - try: - ret = main() - except bb.BBHandledException: - ret = 1 - except Exception: - ret = 1 - import traceback - traceback.print_exc() - sys.exit(ret) diff --git a/bitbake/bin/bitbake-prserv b/bitbake/bin/bitbake-prserv deleted file mode 100755 index 3992e84eab..0000000000 --- a/bitbake/bin/bitbake-prserv +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import sys,logging -import argparse -import warnings -warnings.simplefilter("default") - -sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib")) - -import prserv -import prserv.serv - -VERSION = "2.0.0" - -PRHOST_DEFAULT="0.0.0.0" -PRPORT_DEFAULT=8585 - -def init_logger(logfile, loglevel): - numeric_level = getattr(logging, loglevel.upper(), None) - if not isinstance(numeric_level, int): - raise ValueError("Invalid log level: %s" % loglevel) - FORMAT = "%(asctime)-15s %(message)s" - logging.basicConfig(level=numeric_level, filename=logfile, format=FORMAT) - -def main(): - parser = argparse.ArgumentParser( - description="BitBake PR Server. Version=%s" % VERSION, - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument( - "-f", - "--file", - default="prserv.sqlite3", - help="database filename (default: prserv.sqlite3)", - ) - parser.add_argument( - "-l", - "--log", - default="prserv.log", - help="log filename(default: prserv.log)", - ) - parser.add_argument( - "--loglevel", - default="INFO", - help="logging level, i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG", - ) - parser.add_argument( - "--start", - action="store_true", - help="start daemon", - ) - parser.add_argument( - "--stop", - action="store_true", - help="stop daemon", - ) - parser.add_argument( - "--host", - help="ip address to bind", - default=PRHOST_DEFAULT, - ) - parser.add_argument( - "--port", - type=int, - default=PRPORT_DEFAULT, - help="port number (default: 8585)", - ) - parser.add_argument( - "-r", - "--read-only", - action="store_true", - help="open database in read-only mode", - ) - parser.add_argument( - "-u", - "--upstream", - default=os.environ.get("PRSERV_UPSTREAM", None), - help="Upstream PR service (host:port)", - ) - - args = parser.parse_args() - init_logger(os.path.abspath(args.log), args.loglevel) - - if args.start: - ret=prserv.serv.start_daemon( - args.file, - args.host, - args.port, - os.path.abspath(args.log), - args.read_only, - args.upstream - ) - elif args.stop: - ret=prserv.serv.stop_daemon(args.host, args.port) - else: - ret=parser.print_help() - return ret - -if __name__ == "__main__": - try: - ret = main() - except Exception: - ret = 1 - import traceback - traceback.print_exc() - sys.exit(ret) - diff --git a/bitbake/bin/bitbake-selftest b/bitbake/bin/bitbake-selftest deleted file mode 100755 index fb7c57dd83..0000000000 --- a/bitbake/bin/bitbake-selftest +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2012 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import sys, logging -import warnings -warnings.simplefilter("default") -sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib')) - -import unittest -try: - import bb - import hashserv - import prserv - import layerindexlib -except RuntimeError as exc: - sys.exit(str(exc)) - -tests = ["bb.tests.codeparser", - "bb.tests.color", - "bb.tests.cooker", - "bb.tests.cow", - "bb.tests.data", - "bb.tests.event", - "bb.tests.fetch", - "bb.tests.parse", - "bb.tests.runqueue", - "bb.tests.setup", - "bb.tests.siggen", - "bb.tests.utils", - "bb.tests.compression", - "bb.tests.filter", - "hashserv.tests", - "prserv.tests", - "layerindexlib.tests.layerindexobj", - "layerindexlib.tests.restapi", - "layerindexlib.tests.cooker"] - -for t in tests: - t = '.'.join(t.split('.')[:3]) - __import__(t) - - -# Set-up logging -class StdoutStreamHandler(logging.StreamHandler): - """Special handler so that unittest is able to capture stdout""" - def __init__(self): - # Override __init__() because we don't want to set self.stream here - logging.Handler.__init__(self) - - @property - def stream(self): - # We want to dynamically write wherever sys.stdout is pointing to - return sys.stdout - - -handler = StdoutStreamHandler() -bb.logger.addHandler(handler) -bb.logger.setLevel(logging.DEBUG) - - -ENV_HELP = """\ -Environment variables: - BB_SKIP_NETTESTS set to 'yes' in order to skip tests using network - connection - BB_TMPDIR_NOCLEAN set to 'yes' to preserve test tmp directories -""" - -class main(unittest.main): - def _print_help(self, *args, **kwargs): - super(main, self)._print_help(*args, **kwargs) - print(ENV_HELP) - - -if __name__ == '__main__': - main(defaultTest=tests, buffer=True) diff --git a/bitbake/bin/bitbake-server b/bitbake/bin/bitbake-server deleted file mode 100755 index 01f83d982f..0000000000 --- a/bitbake/bin/bitbake-server +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Copyright (C) 2020 Richard Purdie -# - -import os -import sys -import warnings -warnings.simplefilter("default") -warnings.filterwarnings("ignore", category=DeprecationWarning, message=".*use.of.fork.*may.lead.to.deadlocks.in.the.child.*") -import logging -sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib')) - -import bb - -bb.utils.check_system_locale() - -# Users shouldn't be running this code directly -if len(sys.argv) != 11 or not sys.argv[1].startswith("decafbad"): - print("bitbake-server is meant for internal execution by bitbake itself, please don't use it standalone.") - sys.exit(1) - -import bb.server.process - -lockfd = int(sys.argv[2]) -readypipeinfd = int(sys.argv[3]) -logfile = sys.argv[4] -lockname = sys.argv[5] -sockname = sys.argv[6] -timeout = float(sys.argv[7]) -profile = sys.argv[8] -xmlrpcinterface = (sys.argv[9], int(sys.argv[10])) -if xmlrpcinterface[0] == "None": - xmlrpcinterface = (None, xmlrpcinterface[1]) - -# Replace standard fds with our own -with open('/dev/null', 'r') as si: - os.dup2(si.fileno(), sys.stdin.fileno()) - -with open(logfile, 'a+') as so: - os.dup2(so.fileno(), sys.stdout.fileno()) - os.dup2(so.fileno(), sys.stderr.fileno()) - -# Have stdout and stderr be the same so log output matches chronologically -# and there aren't two seperate buffers -sys.stderr = sys.stdout - -logger = logging.getLogger("BitBake") -# Ensure logging messages get sent to the UI as events -handler = bb.event.LogHandler() -logger.addHandler(handler) - -bb.server.process.execServer(lockfd, readypipeinfd, lockname, sockname, timeout, xmlrpcinterface, profile) - diff --git a/bitbake/bin/bitbake-setup b/bitbake/bin/bitbake-setup deleted file mode 100755 index caec990c29..0000000000 --- a/bitbake/bin/bitbake-setup +++ /dev/null @@ -1,883 +0,0 @@ -#!/usr/bin/env python3 - -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import logging -import os -import sys -import argparse -import warnings -import json -import shutil -import time -import stat -import tempfile -import configparser -import datetime -import glob -import subprocess -import copy - -default_registry = os.path.normpath(os.path.dirname(__file__) + "/../default-registry") - -bindir = os.path.abspath(os.path.dirname(__file__)) -sys.path[0:0] = [os.path.join(os.path.dirname(bindir), 'lib')] - -import bb.msg -import bb.process - -logger = bb.msg.logger_create('bitbake-setup', sys.stdout) - -def cache_dir(top_dir): - return os.path.join(top_dir, '.bitbake-setup-cache') - -def init_bb_cache(top_dir, settings, args): - dldir = settings["default"]["dl-dir"] - bb_cachedir = os.path.join(cache_dir(top_dir), 'bitbake-cache') - - d = bb.data.init() - d.setVar("DL_DIR", dldir) - d.setVar("BB_CACHEDIR", bb_cachedir) - d.setVar("__BBSRCREV_SEEN", "1") - if args.no_network: - d.setVar("BB_SRCREV_POLICY", "cache") - bb.fetch.fetcher_init(d) - return d - -def save_bb_cache(): - bb.fetch2.fetcher_parse_save() - bb.fetch2.fetcher_parse_done() - -def get_config_name(config): - suffix = '.conf.json' - config_file = os.path.basename(config) - if config_file.endswith(suffix): - return config_file[:-len(suffix)] - else: - raise Exception("Config file {} does not end with {}, please rename the file.".format(config, suffix)) - -def write_upstream_config(config_dir, config_data): - with open(os.path.join(config_dir, "config-upstream.json"),'w') as s: - json.dump(config_data, s, sort_keys=True, indent=4) - -def write_sources_fixed_revisions(config_dir, config_data): - sources = {} - sources['sources'] = config_data - with open(os.path.join(config_dir, "sources-fixed-revisions.json"),'w') as s: - json.dump(sources, s, sort_keys=True, indent=4) - -def commit_config(config_dir): - bb.process.run("git -C {} add .".format(config_dir)) - bb.process.run("git -C {} commit --no-verify -a -m 'Configuration at {}'".format(config_dir, time.asctime())) - -def _write_layer_list(dest, repodirs): - layers = [] - for r in repodirs: - for root, dirs, files in os.walk(os.path.join(dest,r)): - if os.path.basename(root) == 'conf' and 'layer.conf' in files: - layers.append(os.path.relpath(os.path.dirname(root), dest)) - layers_f = os.path.join(dest, ".oe-layers.json") - with open(layers_f, 'w') as f: - json.dump({"version":"1.0","layers":layers}, f, sort_keys=True, indent=4) - -def checkout_layers(layers, layerdir, d): - layers_fixed_revisions = copy.deepcopy(layers) - repodirs = [] - oesetupbuild = None - print("Fetching layer/tool repositories into {}".format(layerdir)) - for r_name in layers: - r_data = layers[r_name] - repodir = r_data["path"] - repodirs.append(repodir) - - r_remote = r_data['git-remote'] - rev = r_remote['rev'] - branch = r_remote.get('branch', None) - remotes = r_remote['remotes'] - - for remote in remotes: - prot,host,path,user,pswd,params = bb.fetch.decodeurl(remotes[remote]["uri"]) - fetchuri = bb.fetch.encodeurl(('git',host,path,user,pswd,params)) - print(" {}".format(r_name)) - if branch: - src_uri = f"{fetchuri};protocol={prot};rev={rev};branch={branch};destsuffix={repodir}" - else: - src_uri = f"{fetchuri};protocol={prot};rev={rev};nobranch=1;destsuffix={repodir}" - fetcher = bb.fetch.Fetch([src_uri], d) - do_fetch(fetcher, layerdir) - urldata = fetcher.ud[src_uri] - revision = urldata.revision - layers_fixed_revisions[r_name]['git-remote']['rev'] = revision - - if os.path.exists(os.path.join(layerdir, repodir, 'scripts/oe-setup-build')): - oesetupbuild = os.path.join(layerdir, repodir, 'scripts/oe-setup-build') - oeinitbuildenvdir = os.path.join(layerdir, repodir) - - print(" ") - _write_layer_list(layerdir, repodirs) - - if oesetupbuild: - links = {'setup-build': oesetupbuild, 'oe-scripts': os.path.dirname(oesetupbuild), 'oe-init-build-env-dir': oeinitbuildenvdir} - for l,t in links.items(): - symlink = os.path.join(layerdir, l) - if os.path.lexists(symlink): - os.remove(symlink) - os.symlink(os.path.relpath(t,layerdir),symlink) - - return layers_fixed_revisions - -def setup_bitbake_build(bitbake_config, layerdir, setupdir, thisdir): - def _setup_build_conf(layers, filerelative_layers, build_conf_dir): - os.makedirs(build_conf_dir) - layers_s = [] - - for l in layers: - l = os.path.join(layerdir, l) - layers_s.append(" {} \\".format(l)) - - for l in filerelative_layers: - if thisdir: - l = os.path.join(thisdir, l) - else: - raise Exception("Configuration is using bb-layers-file-relative to specify " \ - "a layer path relative to itself. This can be done only " \ - "when the configuration is specified by its path on local " \ - "disk, not when it's in a registry or is fetched over http.") - layers_s.append(" {} \\".format(l)) - - layers_s = "\n".join(layers_s) - bblayers_conf = """BBLAYERS ?= " \\ -{} - " -""".format(layers_s) - with open(os.path.join(build_conf_dir, "bblayers.conf"), 'w') as f: - f.write(bblayers_conf) - - local_conf = """# -# This file is intended for local configuration tweaks. -# -# If you would like to publish and share changes made to this file, -# it is recommended to put them into a distro config, or to create -# layer fragments from changes made here. -# -""" - with open(os.path.join(build_conf_dir, "local.conf"), 'w') as f: - f.write(local_conf) - - with open(os.path.join(build_conf_dir, "templateconf.cfg"), 'w') as f: - f.write("") - - with open(os.path.join(build_conf_dir, "conf-summary.txt"), 'w') as f: - f.write(bitbake_config["description"] + "\n") - - with open(os.path.join(build_conf_dir, "conf-notes.txt"), 'w') as f: - f.write("") - - def _make_init_build_env(builddir, oeinitbuildenvdir): - builddir = os.path.realpath(builddir) - cmd = "cd {}\nset {}\n. ./oe-init-build-env\n".format(oeinitbuildenvdir, builddir) - initbuild_in_builddir = os.path.join(builddir, 'init-build-env') - - with open(initbuild_in_builddir, 'w') as f: - f.write("# init-build-env wrapper created by bitbake-setup\n") - f.write(cmd + '\n') - - def _prepend_passthrough_to_init_build_env(builddir): - env = bitbake_config.get("bb-env-passthrough-additions") - if not env: - return - - initbuild_in_builddir = os.path.join(builddir, 'init-build-env') - with open(initbuild_in_builddir) as f: - content = f.read() - - joined = " \\\n".join(env) - env = "export BB_ENV_PASSTHROUGH_ADDITIONS=\" \\\n" - env += "${BB_ENV_PASSTHROUGH_ADDITIONS} \\\n" - env += joined - env += '"' - - with open(initbuild_in_builddir, 'w') as f: - f.write("# environment passthrough added by bitbake-setup\n") - f.write(env + '\n') - f.write('\n') - f.write(content) - - bitbake_builddir = os.path.join(setupdir, "build") - print("Setting up bitbake configuration in\n {}\n".format(bitbake_builddir)) - - template = bitbake_config.get("oe-template") - layers = bitbake_config.get("bb-layers") - if not template and not layers: - print("Bitbake configuration does not contain a reference to an OpenEmbedded build template via 'oe-template' or a list of layers via 'bb-layers'; please use oe-setup-build, oe-init-build-env or another mechanism manually to complete the setup.") - return - oesetupbuild = os.path.join(layerdir, 'setup-build') - if template and not os.path.exists(oesetupbuild): - raise Exception("Cannot complete setting up a bitbake build directory from OpenEmbedded template '{}' as oe-setup-build was not found in any layers; please use oe-init-build-env manually.".format(template)) - - bitbake_confdir = os.path.join(bitbake_builddir, 'conf') - backup_bitbake_confdir = bitbake_confdir + "-backup.{}".format(time.strftime("%Y%m%d%H%M%S")) - if os.path.exists(bitbake_confdir): - os.rename(bitbake_confdir, backup_bitbake_confdir) - - if layers: - filerelative_layers = bitbake_config.get("bb-layers-file-relative") or [] - _setup_build_conf(layers, filerelative_layers, bitbake_confdir) - - if template: - bb.process.run("{} setup -c {} -b {} --no-shell".format(oesetupbuild, template, bitbake_builddir)) - else: - oeinitbuildenvdir = os.path.join(layerdir, 'oe-init-build-env-dir') - if not os.path.exists(os.path.join(oeinitbuildenvdir, "oe-init-build-env")): - print("Could not find oe-init-build-env in any of the layers; please use another mechanism to initialize the bitbake environment") - return - _make_init_build_env(bitbake_builddir, os.path.realpath(oeinitbuildenvdir)) - - _prepend_passthrough_to_init_build_env(bitbake_builddir) - - siteconf_symlink = os.path.join(bitbake_confdir, "site.conf") - siteconf = os.path.normpath(os.path.join(setupdir, '..', "site.conf")) - if os.path.lexists(siteconf_symlink): - os.remove(symlink) - os.symlink(os.path.relpath(siteconf, bitbake_confdir) ,siteconf_symlink) - - - init_script = os.path.join(bitbake_builddir, "init-build-env") - shell = "bash" - fragments = bitbake_config.get("oe-fragments", []) + sorted(bitbake_config.get("oe-fragment-choices",{}).values()) - if fragments: - bb.process.run("{} -c '. {} && bitbake-config-build enable-fragment {}'".format(shell, init_script, " ".join(fragments))) - - if os.path.exists(backup_bitbake_confdir): - bitbake_config_diff = get_diff(backup_bitbake_confdir, bitbake_confdir) - if bitbake_config_diff: - print("Existing bitbake configuration directory renamed to {}".format(backup_bitbake_confdir)) - print("The bitbake configuration has changed:") - print(bitbake_config_diff) - else: - shutil.rmtree(backup_bitbake_confdir) - - print("This bitbake configuration provides:\n {}\n".format(bitbake_config["description"])) - - readme = """{}\n\nAdditional information is in {} and {}\n -Source the environment using '. {}' to run builds from the command line. -The bitbake configuration files (local.conf, bblayers.conf and more) can be found in {}/conf -""".format( - bitbake_config["description"], - os.path.join(bitbake_builddir,'conf/conf-summary.txt'), - os.path.join(bitbake_builddir,'conf/conf-notes.txt'), - init_script, - bitbake_builddir - ) - readme_file = os.path.join(bitbake_builddir, "README") - with open(readme_file, 'w') as f: - f.write(readme) - print("Usage instructions and additional information are in\n {}\n".format(readme_file)) - print("The bitbake configuration files (local.conf, bblayers.conf and more) can be found in\n {}/conf\n".format(bitbake_builddir)) - print("To run builds, source the environment using\n . {}".format(init_script)) - -def get_registry_config(registry_path, id): - for root, dirs, files in os.walk(registry_path): - for f in files: - if f.endswith('.conf.json') and id == get_config_name(f): - return os.path.join(root, f) - raise Exception("Unable to find {} in available configurations; use 'list' sub-command to see what is available".format(id)) - -def update_build(config, confdir, setupdir, layerdir, d): - layer_config = copy.deepcopy(config["data"]["sources"]) - layer_overrides = config["source-overrides"]["sources"] - for k,v in layer_overrides.items(): - if k in layer_config: - layer_config[k]["git-remote"] = v["git-remote"] - sources_fixed_revisions = checkout_layers(layer_config, layerdir, d) - bitbake_config = config["bitbake-config"] - thisdir = os.path.dirname(config["path"]) if config["type"] == 'local' else None - setup_bitbake_build(bitbake_config, layerdir, setupdir, thisdir) - write_sources_fixed_revisions(confdir, sources_fixed_revisions) - -def int_input(allowed_values): - n = None - while n is None: - try: - n = int(input()) - except ValueError: - print('Not a valid number, please try again:') - continue - if n not in allowed_values: - print('Number {} not one of {}, please try again:'.format(n, allowed_values)) - n = None - return n - -def flatten_bitbake_configs(configs): - def merge_configs(c1,c2): - c_merged = {} - for k,v in c2.items(): - if k not in c1.keys(): - c_merged[k] = v - for k,v in c1.items(): - if k not in c2.keys(): - c_merged[k] = v - else: - c_merged[k] = c1[k] + c2[k] - del c_merged['configurations'] - return c_merged - - flattened_configs = [] - for c in configs: - if 'configurations' not in c: - flattened_configs.append(c) - else: - for sub_c in flatten_bitbake_configs(c['configurations']): - flattened_configs.append(merge_configs(c, sub_c)) - return flattened_configs - -def choose_bitbake_config(configs, parameters, non_interactive): - flattened_configs = flatten_bitbake_configs(configs) - configs_dict = {i["name"]:i for i in flattened_configs} - - if parameters: - config_id = parameters[0] - if config_id not in configs_dict: - raise Exception("Bitbake configuration {} not found; replace with one of {}".format(config_id, configs_dict)) - return configs_dict[config_id] - - enumerated_configs = list(enumerate(flattened_configs)) - if len(enumerated_configs) == 1: - only_config = flattened_configs[0] - print("\nSelecting the only available bitbake configuration {}".format(only_config["name"])) - return only_config - - if non_interactive: - raise Exception("Unable to choose from bitbake configurations in non-interactive mode: {}".format(configs_dict)) - - print("\nAvailable bitbake configurations:") - for n, config_data in enumerated_configs: - print("{}. {}\t{}".format(n, config_data["name"], config_data["description"])) - print("\nPlease select one of the above bitbake configurations by its number:") - config_n = int_input([i[0] for i in enumerated_configs]) - return flattened_configs[config_n] - -def choose_config(configs, non_interactive): - not_expired_configs = [k for k in configs.keys() if not has_expired(configs[k].get("expires", None))] - config_list = list(enumerate(not_expired_configs)) - if len(config_list) == 1: - only_config = config_list[0][1] - print("\nSelecting the only available configuration {}\n".format(only_config)) - return only_config - - if non_interactive: - raise Exception("Unable to choose from configurations in non-interactive mode: {}".format(not_expired_configs)) - - print("\nAvailable configurations:") - for n, config_name in config_list: - config_data = configs[config_name] - expiry_date = config_data.get("expires", None) - config_desc = config_data["description"] - if expiry_date: - print("{}. {}\t{} (supported until {})".format(n, config_name, config_desc, expiry_date)) - else: - print("{}. {}\t{}".format(n, config_name, config_desc)) - print("\nPlease select one of the above configurations by its number:") - config_n = int_input([i[0] for i in config_list]) - return config_list[config_n][1] - -def choose_fragments(possibilities, parameters, non_interactive, skip_selection): - choices = {} - for k,v in possibilities.items(): - if skip_selection and k in skip_selection: - print("Skipping a selection of {}, as requested on command line. The resulting bitbake configuration may require further manual adjustments.".format(k)) - continue - choice = [o for o in v["options"] if o in parameters] - if len(choice) > 1: - raise Exception("Options specified on command line do not allow a single selection from possibilities {}, please remove one or more from {}".format(v["options"], parameters)) - if len(choice) == 1: - choices[k] = choice[0] - continue - - if non_interactive: - raise Exception("Unable to choose from options in non-interactive mode: {}".format(v["options"])) - - print("\n" + v["description"] + ":") - options_enumerated = list(enumerate(v["options"])) - for n,o in options_enumerated: - print("{}. {}".format(n, o)) - print("\nPlease select one of the above options by its number:") - option_n = int_input([i[0] for i in options_enumerated]) - choices[k] = options_enumerated[option_n][1] - return choices - -def obtain_config(top_dir, settings, args, source_overrides, d): - if args.config: - config_id = args.config[0] - config_parameters = args.config[1:] - if os.path.exists(config_id): - print("Reading configuration from local file\n {}".format(config_id)) - upstream_config = {'type':'local', - 'path':os.path.abspath(config_id), - 'name':get_config_name(config_id), - 'data':json.load(open(config_id)) - } - elif config_id.startswith("http://") or config_id.startswith("https://"): - print("Reading configuration from network URI\n {}".format(config_id)) - import urllib.request - with urllib.request.urlopen(config_id) as f: - upstream_config = {'type':'network','uri':config_id,'name':get_config_name(config_id),'data':json.load(f)} - else: - print("Looking up config {} in configuration registry".format(config_id)) - registry_path = update_registry(settings["default"]["registry"], cache_dir(top_dir), d) - registry_configs = list_registry(registry_path, with_expired=True) - if config_id not in registry_configs: - raise Exception("Config {} not found in configuration registry, re-run 'init' without parameters to choose from available configurations.".format(config_id)) - upstream_config = {'type':'registry','registry':settings["default"]["registry"],'name':config_id,'data':json.load(open(get_registry_config(registry_path,config_id)))} - expiry_date = upstream_config['data'].get("expires", None) - if has_expired(expiry_date): - print("This configuration is no longer supported after {}. Please consider changing to a supported configuration.".format(expiry_date)) - else: - registry_path = update_registry(settings["default"]["registry"], cache_dir(top_dir), d) - registry_configs = list_registry(registry_path, with_expired=True) - config_id = choose_config(registry_configs, args.non_interactive) - config_parameters = [] - upstream_config = {'type':'registry','registry':settings["default"]["registry"],'name':config_id,'data':json.load(open(get_registry_config(registry_path,config_id)))} - - upstream_config['bitbake-config'] = choose_bitbake_config(upstream_config['data']['bitbake-setup']['configurations'], config_parameters, args.non_interactive) - upstream_config['bitbake-config']['oe-fragment-choices'] = choose_fragments(upstream_config['bitbake-config'].get('oe-fragments-one-of',{}), config_parameters[1:], args.non_interactive, args.skip_selection) - upstream_config['non-interactive-cmdline-options'] = [config_id, upstream_config['bitbake-config']['name']] + sorted(upstream_config['bitbake-config']['oe-fragment-choices'].values()) - upstream_config['source-overrides'] = source_overrides - upstream_config['skip-selection'] = args.skip_selection - return upstream_config - -def init_config(top_dir, settings, args, d): - stdout = sys.stdout - def handle_task_progress(event, d): - rate = event.rate if event.rate else '' - progress = event.progress if event.progress > 0 else 0 - print("{}% {} ".format(progress, rate), file=stdout, end='\r') - - create_siteconf(top_dir, args.non_interactive) - source_overrides = json.load(open(args.source_overrides)) if args.source_overrides else {'sources':{}} - upstream_config = obtain_config(top_dir, settings, args, source_overrides, d) - print("\nRun 'bitbake-setup init --non-interactive {}' to select this configuration non-interactively.\n".format(" ".join(upstream_config['non-interactive-cmdline-options']))) - - setupdir = os.path.join(os.path.abspath(top_dir), args.setup_dir_name or "{}-{}".format(upstream_config['name']," ".join(upstream_config['non-interactive-cmdline-options'][1:]).replace(" ","-").replace("/","_"))) - if os.path.exists(os.path.join(setupdir, "layers")): - print(f"Setup already initialized in:\n {setupdir}\nUse 'bitbake-setup status' to check if it needs to be updated, or 'bitbake-setup update' to perform the update.\nIf you would like to start over and re-initialize in this directory, remove it, and run 'bitbake-setup init' again.") - return - - print("Initializing a setup directory in\n {}".format(setupdir)) - if not args.non_interactive: - y_or_n = input('Continue? (y/N): ') - if y_or_n != 'y': - exit() - print() - - os.makedirs(setupdir, exist_ok=True) - - confdir = os.path.join(setupdir, "config") - layerdir = os.path.join(setupdir, "layers") - - os.makedirs(confdir) - os.makedirs(layerdir) - - bb.process.run("git -C {} init -b main".format(confdir)) - # Make sure commiting doesn't fail if no default git user is configured on the machine - bb.process.run("git -C {} config user.name bitbake-setup".format(confdir)) - bb.process.run("git -C {} config user.email bitbake-setup@not.set".format(confdir)) - bb.process.run("git -C {} commit --no-verify --allow-empty -m 'Initial commit'".format(confdir)) - - bb.event.register("bb.build.TaskProgress", handle_task_progress, data=d) - - write_upstream_config(confdir, upstream_config) - update_build(upstream_config, confdir, setupdir, layerdir, d) - commit_config(confdir) - - bb.event.remove("bb.build.TaskProgress", None) - -def get_diff(file1, file2): - try: - bb.process.run('diff -uNr {} {}'.format(file1, file2)) - except bb.process.ExecutionError as e: - if e.exitcode == 1: - return e.stdout - else: - raise e - return None - -def are_layers_changed(layers, layerdir, d): - changed = False - for r_name in layers: - r_data = layers[r_name] - repodir = r_data["path"] - - r_remote = r_data['git-remote'] - rev = r_remote['rev'] - branch = r_remote.get('branch', None) - remotes = r_remote['remotes'] - - for remote in remotes: - type,host,path,user,pswd,params = bb.fetch.decodeurl(remotes[remote]["uri"]) - fetchuri = bb.fetch.encodeurl(('git',host,path,user,pswd,params)) - if branch: - fetcher = bb.fetch.FetchData("{};protocol={};rev={};branch={};destsuffix={}".format(fetchuri,type,rev,branch,repodir), d) - else: - fetcher = bb.fetch.FetchData("{};protocol={};rev={};nobranch=1;destsuffix={}".format(fetchuri,type,rev,repodir), d) - upstream_revision = fetcher.method.latest_revision(fetcher, d, 'default') - rev_parse_result = bb.process.run('git -C {} rev-parse HEAD'.format(os.path.join(layerdir, repodir))) - local_revision = rev_parse_result[0].strip() - if upstream_revision != local_revision: - changed = True - print('Layer repository {} checked out into {} updated revision {} from {} to {}'.format(remotes[remote]["uri"], os.path.join(layerdir, repodir), rev, local_revision, upstream_revision)) - - return changed - -def build_status(top_dir, settings, args, d, update=False): - setupdir = args.setup_dir - - confdir = os.path.join(setupdir, "config") - layerdir = os.path.join(setupdir, "layers") - - current_upstream_config = json.load(open(os.path.join(confdir, "config-upstream.json"))) - - args.config = current_upstream_config['non-interactive-cmdline-options'] - args.non_interactive = True - args.skip_selection = current_upstream_config['skip-selection'] - source_overrides = current_upstream_config["source-overrides"] - new_upstream_config = obtain_config(top_dir, settings, args, source_overrides, d) - - write_upstream_config(confdir, new_upstream_config) - config_diff = bb.process.run('git -C {} diff'.format(confdir))[0] - - if config_diff: - print('\nConfiguration in {} has changed:\n{}'.format(setupdir, config_diff)) - if update: - commit_config(confdir) - update_build(new_upstream_config, confdir, setupdir, layerdir, d) - else: - bb.process.run('git -C {} restore config-upstream.json'.format(confdir)) - return - - if are_layers_changed(current_upstream_config["data"]["sources"], layerdir, d): - if update: - update_build(current_upstream_config, confdir, setupdir, layerdir, d) - return - - print("\nConfiguration in {} has not changed.".format(setupdir)) - -def build_update(top_dir, settings, args, d): - build_status(top_dir, settings, args, d, update=True) - -def do_fetch(fetcher, dir): - # git fetcher simply dumps git output to stdout; in bitbake context that is redirected to temp/log.do_fetch - # and we need to set up smth similar here - fetchlogdir = os.path.join(dir, 'logs') - os.makedirs(fetchlogdir, exist_ok=True) - fetchlog = os.path.join(fetchlogdir, 'fetch_log.{}'.format(datetime.datetime.now().strftime("%Y%m%d%H%M%S"))) - with open(fetchlog, 'a') as f: - oldstdout = sys.stdout - sys.stdout = f - fetcher.download() - fetcher.unpack(dir) - sys.stdout = oldstdout - -def update_registry(registry, cachedir, d): - registrydir = 'configurations' - if registry.startswith("."): - full_registrydir = os.path.join(os.getcwd(), registry, registrydir) - elif registry.startswith("/"): - full_registrydir = os.path.join(registry, registrydir) - else: - full_registrydir = os.path.join(cachedir, registrydir) - print("Fetching configuration registry\n {}\ninto\n {}".format(registry, full_registrydir)) - fetcher = bb.fetch.Fetch(["{};destsuffix={}".format(registry, registrydir)], d) - do_fetch(fetcher, cachedir) - return full_registrydir - -def has_expired(expiry_date): - if expiry_date: - return datetime.datetime.now() > datetime.datetime.fromisoformat(expiry_date) - return False - -def list_registry(registry_path, with_expired): - json_data = {} - - for root, dirs, files in os.walk(registry_path): - for f in files: - if f.endswith('.conf.json'): - config_name = get_config_name(f) - config_data = json.load(open(os.path.join(root, f))) - config_desc = config_data["description"] - expiry_date = config_data.get("expires", None) - if expiry_date: - if with_expired or not has_expired(expiry_date): - json_data[config_name] = {"description": config_desc, "expires": expiry_date} - else: - json_data[config_name] = {"description": config_desc} - return json_data - -def list_configs(top_dir, settings, args, d): - registry_path = update_registry(settings["default"]["registry"], cache_dir(top_dir), d) - json_data = list_registry(registry_path, args.with_expired) - print("\nAvailable configurations:") - for config_name, config_data in json_data.items(): - expiry_date = config_data.get("expires", None) - config_desc = config_data["description"] - if expiry_date: - if args.with_expired or not has_expired(expiry_date): - print("{}\t{} (supported until {})".format(config_name, config_desc, expiry_date)) - else: - print("{}\t{}".format(config_name, config_desc)) - print("\nRun 'init' with one of the above configuration identifiers to set up a build.") - - if args.write_json: - with open(args.write_json, 'w') as f: - json.dump(json_data, f, sort_keys=True, indent=4) - print("Available configurations written into {}".format(args.write_json)) - -def install_buildtools(top_dir, settings, args, d): - buildtools_install_dir = os.path.join(args.setup_dir, 'buildtools') - if os.path.exists(buildtools_install_dir): - if not args.force: - print("Buildtools are already installed in {}.".format(buildtools_install_dir)) - env_scripts = glob.glob(os.path.join(buildtools_install_dir, 'environment-setup-*')) - if env_scripts: - print("If you wish to use them, you need to source the environment setup script e.g.") - for s in env_scripts: - print("$ . {}".format(s)) - print("You can also re-run bitbake-setup install-buildtools with --force option to force a reinstallation.") - return - shutil.rmtree(buildtools_install_dir) - - install_buildtools = os.path.join(args.setup_dir, 'layers/oe-scripts/install-buildtools') - buildtools_download_dir = os.path.join(args.setup_dir, 'buildtools-downloads/{}'.format(time.strftime("%Y%m%d%H%M%S"))) - print("Buildtools archive is downloaded into {} and its content installed into {}".format(buildtools_download_dir, buildtools_install_dir)) - subprocess.check_call("{} -d {} --downloads-directory {}".format(install_buildtools, buildtools_install_dir, buildtools_download_dir), shell=True) - -def create_siteconf(top_dir, non_interactive=True): - siteconfpath = os.path.join(top_dir, 'site.conf') - print('A common site.conf file will be created, please edit or replace before running builds\n {}\n'.format(siteconfpath)) - if not non_interactive: - y_or_n = input('Proceed? (y/N): ') - if y_or_n != 'y': - exit() - - os.makedirs(os.path.dirname(top_dir), exist_ok=True) - if os.path.exists(siteconfpath): - backup_siteconf = siteconfpath + "-backup.{}".format(time.strftime("%Y%m%d%H%M%S")) - os.rename(siteconfpath, backup_siteconf) - print("Previous settings are in {}".format(backup_siteconf)) - with open(siteconfpath, 'w') as siteconffile: - siteconffile.write('# This file is intended for build host-specific bitbake settings\n') - -def topdir_settings_path(top_dir): - return os.path.join(top_dir, 'settings.conf') - -def global_settings_path(args): - return os.path.abspath(args.global_settings) if args.global_settings else os.path.join(os.path.expanduser('~'), '.config', 'bitbake-setup', 'settings.conf') - -def load_settings(settings_path): - settings = configparser.ConfigParser() - if os.path.exists(settings_path): - print('Loading settings from\n {}\n'.format(settings_path)) - settings.read_file(open(settings_path)) - return settings - -def change_setting(top_dir, args): - if vars(args)['global']: - settings_path = global_settings_path(args) - else: - settings_path = topdir_settings_path(top_dir) - settings = load_settings(settings_path) - - if args.subcommand == 'set': - if args.section not in settings.keys(): - settings[args.section] = {} - settings[args.section][args.setting] = args.value - print(f"From section '{args.section}' the setting '{args.setting}' was changed to '{args.value}'") - if args.subcommand == 'unset': - if args.section in settings.keys() and args.setting in settings[args.section].keys(): - del settings[args.section][args.setting] - print(f"From section '{args.section}' the setting '{args.setting}' has been removed") - - os.makedirs(os.path.dirname(settings_path), exist_ok=True) - with open(settings_path, 'w') as settingsfile: - settings.write(settingsfile) - print(f"Settings written to {settings_path}") - -def list_settings(all_settings): - for section, section_settings in all_settings.items(): - for key, value in section_settings.items(): - print("{} {} {}".format(section, key, value)) - -def settings_func(top_dir, all_settings, args): - if args.subcommand == 'list': - list_settings(all_settings) - elif args.subcommand == 'set' or args.subcommand == 'unset': - change_setting(top_dir, args) - -def get_setup_dir_via_bbpath(): - bbpath = os.environ.get('BBPATH') - if bbpath: - bitbake_dir = os.path.normpath(bbpath.split(':')[0]) - if os.path.exists(os.path.join(bitbake_dir,'init-build-env')): - setup_dir = os.path.dirname(bitbake_dir) - return setup_dir - return None - -def get_top_dir(args, settings): - setup_dir_via_bbpath = get_setup_dir_via_bbpath() - if setup_dir_via_bbpath: - top_dir = os.path.dirname(setup_dir_via_bbpath) - if os.path.exists(topdir_settings_path(top_dir)): - return top_dir - - if hasattr(args, 'setup_dir'): - top_dir = os.path.dirname(os.path.normpath(args.setup_dir)) - return top_dir - - top_dir_prefix = settings['default']['top-dir-prefix'] - top_dir_name = settings['default']['top-dir-name'] - return os.path.join(top_dir_prefix, top_dir_name) - -def merge_settings(builtin_settings, global_settings, topdir_settings, cmdline_settings): - all_settings = builtin_settings - - for s in (global_settings, topdir_settings): - for section, section_settings in s.items(): - for setting, value in section_settings.items(): - if section not in all_settings.keys(): - all_settings[section] = {} - all_settings[section][setting] = value - - for (section, setting, value) in cmdline_settings: - if section not in all_settings.keys(): - all_settings[section] = {} - all_settings[section][setting] = value - - return all_settings - -def main(): - def add_setup_dir_arg(parser): - setup_dir = get_setup_dir_via_bbpath() - if setup_dir: - parser.add_argument('--setup-dir', default=setup_dir, help="Path to the setup, default is %(default)s via BBPATH") - else: - parser.add_argument('--setup-dir', required=True, help="Path to the setup") - - parser = argparse.ArgumentParser( - description="BitBake setup utility. Run with 'init' argument to get started.", - epilog="Use %(prog)s --help to get help on a specific command" - ) - parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true') - parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true') - parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR') - parser.add_argument('--no-network', action='store_true', help='Do not check whether configuration repositories and layer repositories have been updated; use only the local cache.') - parser.add_argument('--global-settings', action='store', metavar='PATH', help='Path to the global settings file.') - parser.add_argument('--setting', default=[], action='append', dest='cmdline_settings', - nargs=3, metavar=('SECTION', 'SETTING', 'VALUE'), - help='Modify a setting (for this bitbake-setup invocation only), for example "--setting default top-dir-prefix /path/to/top/dir".') - - subparsers = parser.add_subparsers() - - parser_list = subparsers.add_parser('list', help='List available configurations') - parser_list.add_argument('--with-expired', action='store_true', help='List also configurations that are no longer supported due to reaching their end-of-life dates.') - parser_list.add_argument('--write-json', action='store', help='Write available configurations into a json file so they can be programmatically processed.') - parser_list.set_defaults(func=list_configs) - - parser_init = subparsers.add_parser('init', help='Select a configuration and initialize a setup from it') - parser_init.add_argument('config', nargs='*', help="path/URL/id to a configuration file (use 'list' command to get available ids), followed by configuration options. Bitbake-setup will ask to choose from available choices if command line doesn't completely specify them.") - parser_init.add_argument('--non-interactive', action='store_true', help='Do not ask to interactively choose from available options; if bitbake-setup cannot make a decision it will stop with a failure.') - parser_init.add_argument('--source-overrides', action='store', help='Override sources information (repositories/revisions) with values from a local json file.') - parser_init.add_argument('--setup-dir-name', action='store', help='A custom setup directory name under the top directory.') - parser_init.add_argument('--skip-selection', action='append', help='Do not select and set an option/fragment from available choices; the resulting bitbake configuration may be incomplete.') - parser_init.set_defaults(func=init_config) - - parser_status = subparsers.add_parser('status', help='Check if the setup needs to be synchronized with configuration') - add_setup_dir_arg(parser_status) - parser_status.set_defaults(func=build_status) - - parser_update = subparsers.add_parser('update', help='Update a setup to be in sync with configuration') - add_setup_dir_arg(parser_update) - parser_update.set_defaults(func=build_update) - - parser_install_buildtools = subparsers.add_parser('install-buildtools', help='Install buildtools which can help fulfil missing or incorrect dependencies on the host machine') - add_setup_dir_arg(parser_install_buildtools) - parser_install_buildtools.add_argument('--force', action='store_true', help='Force a reinstall of buildtools over the previous installation.') - parser_install_buildtools.set_defaults(func=install_buildtools) - - parser_settings_arg_global = argparse.ArgumentParser(add_help=False) - parser_settings_arg_global.add_argument('--global', action='store_true', help="Modify the setting in a global settings file, rather than one specific to a top directory") - - parser_settings = subparsers.add_parser('settings', - help='List current settings, or set or unset a setting in a settings file (e.g. the default prefix and name of the top directory, the location of configuration registry, downloads directory and other settings specific to a top directory)') - parser_settings.set_defaults(func=settings_func) - - subparser_settings = parser_settings.add_subparsers(dest="subcommand", required=True, help="The action to perform on the settings file") - - parser_settings_list = subparser_settings.add_parser('list', - help="List all settings with their values") - - parser_settings_set = subparser_settings.add_parser('set', parents=[parser_settings_arg_global], - help="In a Section, set a setting to a certain value") - parser_settings_set.add_argument("section", metavar="
", help="Section in a settings file, typically 'default'") - parser_settings_set.add_argument("setting", metavar="", help="Name of a setting") - parser_settings_set.add_argument("value", metavar="", help="The setting value") - - parser_settings_unset = subparser_settings.add_parser('unset', parents=[parser_settings_arg_global], - help="Unset a setting, e.g. 'bitbake-setup settings unset default registry' would revert to the registry setting in a global settings file") - parser_settings_unset.add_argument("section", metavar="
", help="Section in a settings file, typically 'default'") - parser_settings_unset.add_argument("setting", metavar="", help="The setting to remove") - - args = parser.parse_args() - - logging.basicConfig(stream=sys.stdout) - if args.debug: - logger.setLevel(logging.DEBUG) - elif args.quiet: - logger.setLevel(logging.ERROR) - - # Need to re-run logger_create with color argument - # (will be the same logger since it has the same name) - bb.msg.logger_create('bitbake-setup', output=sys.stdout, - color=args.color, - level=logger.getEffectiveLevel()) - - if 'func' in args: - if hasattr(args, 'setup_dir'): - if not os.path.exists(os.path.join(args.setup_dir,'build', 'init-build-env')): - print("Not a valid setup directory: build/init-build-env does not exist in {}".format(args.setup_dir)) - return - - if not hasattr(args, 'non_interactive'): - args.non_interactive = True - - builtin_settings = {} - builtin_settings['default'] = { - 'top-dir-prefix':os.path.expanduser('~'), - 'top-dir-name':'bitbake-builds', - 'registry':default_registry, - } - - global_settings = load_settings(global_settings_path(args)) - top_dir = get_top_dir(args, merge_settings(builtin_settings, global_settings, {}, args.cmdline_settings)) - - # This cannot be set with the rest of the builtin settings as top_dir needs to be determined first - builtin_settings['default']['dl-dir'] = os.path.join(top_dir, '.bitbake-setup-downloads') - - topdir_settings = load_settings(topdir_settings_path(top_dir)) - all_settings = merge_settings(builtin_settings, global_settings, topdir_settings, args.cmdline_settings) - - if args.func == settings_func: - settings_func(top_dir, all_settings, args) - return - - print('Bitbake-setup is using {} as top directory ("bitbake-setup settings --help" shows how to change it).\n'.format(top_dir, global_settings_path(args))) - - d = init_bb_cache(top_dir, all_settings, args) - args.func(top_dir, all_settings, args, d) - save_bb_cache() - else: - from argparse import Namespace - parser.print_help() - -main() diff --git a/bitbake/bin/bitbake-worker b/bitbake/bin/bitbake-worker deleted file mode 100755 index d2b146a6a9..0000000000 --- a/bitbake/bin/bitbake-worker +++ /dev/null @@ -1,590 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import sys -import warnings -warnings.simplefilter("default") -warnings.filterwarnings("ignore", category=DeprecationWarning, message=".*use.of.fork.*may.lead.to.deadlocks.in.the.child.*") -sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib')) -from bb import fetch2 -import logging -import bb -import select -import errno -import signal -import pickle -import traceback -import queue -import shlex -import subprocess -import fcntl -from multiprocessing import Lock -from threading import Thread - -# Remove when we have a minimum of python 3.10 -if not hasattr(fcntl, 'F_SETPIPE_SZ'): - fcntl.F_SETPIPE_SZ = 1031 - -bb.utils.check_system_locale() - -# Users shouldn't be running this code directly -if len(sys.argv) != 2 or not sys.argv[1].startswith("decafbad"): - print("bitbake-worker is meant for internal execution by bitbake itself, please don't use it standalone.") - sys.exit(1) - -profiling = False -if sys.argv[1].startswith("decafbadbad"): - profiling = True - try: - import cProfile as profile - except: - import profile - -# Unbuffer stdout to avoid log truncation in the event -# of an unorderly exit as well as to provide timely -# updates to log files for use with tail -try: - if sys.stdout.name == '': - fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL) - fl |= os.O_SYNC - fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, fl) - #sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) -except: - pass - -logger = logging.getLogger("BitBake") - -worker_pipe = sys.stdout.fileno() -bb.utils.nonblockingfd(worker_pipe) -# Try to make the pipe buffers larger as it is much more efficient. If we can't -# e.g. out of buffer space (/proc/sys/fs/pipe-user-pages-soft) then just pass over. -try: - fcntl.fcntl(worker_pipe, fcntl.F_SETPIPE_SZ, 512 * 1024) -except: - pass -# Need to guard against multiprocessing being used in child processes -# and multiple processes trying to write to the parent at the same time -worker_pipe_lock = None - -handler = bb.event.LogHandler() -logger.addHandler(handler) - -if 0: - # Code to write out a log file of all events passing through the worker - logfilename = "/tmp/workerlogfile" - format_str = "%(levelname)s: %(message)s" - conlogformat = bb.msg.BBLogFormatter(format_str) - consolelog = logging.FileHandler(logfilename) - consolelog.setFormatter(conlogformat) - logger.addHandler(consolelog) - -worker_queue = queue.Queue() - -def worker_fire(event, d): - data = b"" + pickle.dumps(event) + b"" - worker_fire_prepickled(data) - -def worker_fire_prepickled(event): - global worker_queue - - worker_queue.put(event) - -# -# We can end up with write contention with the cooker, it can be trying to send commands -# and we can be trying to send event data back. Therefore use a separate thread for writing -# back data to cooker. -# -worker_thread_exit = False - -def worker_flush(worker_queue): - worker_queue_int = bytearray() - global worker_pipe, worker_thread_exit - - while True: - try: - worker_queue_int.extend(worker_queue.get(True, 1)) - except queue.Empty: - pass - while (worker_queue_int or not worker_queue.empty()): - try: - (_, ready, _) = select.select([], [worker_pipe], [], 1) - if not worker_queue.empty(): - worker_queue_int.extend(worker_queue.get()) - written = os.write(worker_pipe, worker_queue_int) - del worker_queue_int[0:written] - except (IOError, OSError) as e: - if e.errno != errno.EAGAIN and e.errno != errno.EPIPE: - raise - if worker_thread_exit and worker_queue.empty() and not worker_queue_int: - return - -worker_thread = Thread(target=worker_flush, args=(worker_queue,)) -worker_thread.start() - -def worker_child_fire(event, d): - global worker_pipe - global worker_pipe_lock - - data = b"" + pickle.dumps(event) + b"" - try: - with bb.utils.lock_timeout(worker_pipe_lock): - while(len(data)): - written = worker_pipe.write(data) - data = data[written:] - except IOError: - sigterm_handler(None, None) - raise - -bb.event.worker_fire = worker_fire - -lf = None -#lf = open("/tmp/workercommandlog", "w+") -def workerlog_write(msg): - if lf: - lf.write(msg) - lf.flush() - -def sigterm_handler(signum, frame): - signal.signal(signal.SIGTERM, signal.SIG_DFL) - os.killpg(0, signal.SIGTERM) - sys.exit() - -def fork_off_task(cfg, data, databuilder, workerdata, extraconfigdata, runtask): - - fn = runtask['fn'] - task = runtask['task'] - taskname = runtask['taskname'] - taskhash = runtask['taskhash'] - unihash = runtask['unihash'] - appends = runtask['appends'] - layername = runtask['layername'] - taskdepdata = runtask['taskdepdata'] - quieterrors = runtask['quieterrors'] - # We need to setup the environment BEFORE the fork, since - # a fork() or exec*() activates PSEUDO... - - envbackup = {} - fakeroot = False - fakeenv = {} - umask = None - - uid = os.getuid() - gid = os.getgid() - - taskdep = runtask['taskdep'] - if 'umask' in taskdep and taskname in taskdep['umask']: - umask = taskdep['umask'][taskname] - elif workerdata["umask"]: - umask = workerdata["umask"] - if umask: - # Convert to a python numeric value as it could be a string - umask = bb.utils.to_filemode(umask) - - dry_run = cfg.dry_run or runtask['dry_run'] - - # We can't use the fakeroot environment in a dry run as it possibly hasn't been built - if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run: - fakeroot = True - envvars = (runtask['fakerootenv'] or "").split() - for key, value in (var.split('=',1) for var in envvars): - envbackup[key] = os.environ.get(key) - os.environ[key] = value - fakeenv[key] = value - - fakedirs = (runtask['fakerootdirs'] or "").split() - for p in fakedirs: - bb.utils.mkdirhier(p) - logger.debug2('Running %s:%s under fakeroot, fakedirs: %s' % - (fn, taskname, ', '.join(fakedirs))) - else: - envvars = (runtask['fakerootnoenv'] or "").split() - for key, value in (var.split('=',1) for var in envvars): - envbackup[key] = os.environ.get(key) - os.environ[key] = value - fakeenv[key] = value - - sys.stdout.flush() - sys.stderr.flush() - - try: - pipein, pipeout = os.pipe() - pipein = os.fdopen(pipein, 'rb', 4096) - pipeout = os.fdopen(pipeout, 'wb', 0) - pid = os.fork() - except OSError as e: - logger.critical("fork failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - if pid == 0: - def child(): - global worker_pipe - global worker_pipe_lock - pipein.close() - - bb.utils.signal_on_parent_exit("SIGTERM") - - # Save out the PID so that the event can include it the - # events - bb.event.worker_pid = os.getpid() - bb.event.worker_fire = worker_child_fire - worker_pipe = pipeout - worker_pipe_lock = Lock() - - # Make the child the process group leader and ensure no - # child process will be controlled by the current terminal - # This ensures signals sent to the controlling terminal like Ctrl+C - # don't stop the child processes. - os.setsid() - - signal.signal(signal.SIGTERM, sigterm_handler) - # Let SIGHUP exit as SIGTERM - signal.signal(signal.SIGHUP, sigterm_handler) - - # No stdin & stdout - # stdout is used as a status report channel and must not be used by child processes. - dumbio = os.open(os.devnull, os.O_RDWR) - os.dup2(dumbio, sys.stdin.fileno()) - os.dup2(dumbio, sys.stdout.fileno()) - - if umask is not None: - os.umask(umask) - - try: - (realfn, virtual, mc) = bb.cache.virtualfn2realfn(fn) - the_data = databuilder.mcdata[mc] - the_data.setVar("BB_WORKERCONTEXT", "1") - the_data.setVar("BB_TASKDEPDATA", taskdepdata) - the_data.setVar('BB_CURRENTTASK', taskname.replace("do_", "")) - if cfg.limited_deps: - the_data.setVar("BB_LIMITEDDEPS", "1") - the_data.setVar("BUILDNAME", workerdata["buildname"]) - the_data.setVar("DATE", workerdata["date"]) - the_data.setVar("TIME", workerdata["time"]) - for varname, value in extraconfigdata.items(): - the_data.setVar(varname, value) - - bb.parse.siggen.set_taskdata(workerdata["sigdata"]) - if "newhashes" in workerdata: - bb.parse.siggen.set_taskhashes(workerdata["newhashes"]) - ret = 0 - - the_data = databuilder.parseRecipe(fn, appends, layername) - the_data.setVar('BB_TASKHASH', taskhash) - the_data.setVar('BB_UNIHASH', unihash) - bb.parse.siggen.setup_datacache_from_datastore(fn, the_data) - - bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN"), taskname.replace("do_", ""))) - - if not bb.utils.to_boolean(the_data.getVarFlag(taskname, 'network')): - if bb.utils.is_local_uid(uid): - logger.debug("Attempting to disable network for %s" % taskname) - bb.utils.disable_network(uid, gid) - else: - logger.debug("Skipping disable network for %s since %s is not a local uid." % (taskname, uid)) - - # exported_vars() returns a generator which *cannot* be passed to os.environ.update() - # successfully. We also need to unset anything from the environment which shouldn't be there - exports = bb.data.exported_vars(the_data) - - bb.utils.empty_environment() - for e, v in exports: - os.environ[e] = v - - for e in fakeenv: - os.environ[e] = fakeenv[e] - the_data.setVar(e, fakeenv[e]) - the_data.setVarFlag(e, 'export', "1") - - task_exports = the_data.getVarFlag(taskname, 'exports') - if task_exports: - for e in task_exports.split(): - the_data.setVarFlag(e, 'export', '1') - v = the_data.getVar(e) - if v is not None: - os.environ[e] = v - - if quieterrors: - the_data.setVarFlag(taskname, "quieterrors", "1") - - except Exception: - if not quieterrors: - logger.critical(traceback.format_exc()) - os._exit(1) - - sys.stdout.flush() - sys.stderr.flush() - - try: - if dry_run: - return 0 - try: - ret = bb.build.exec_task(fn, taskname, the_data, cfg.profile) - finally: - if fakeroot: - fakerootcmd = shlex.split(the_data.getVar("FAKEROOTCMD")) - subprocess.run(fakerootcmd + ['-S'], check=True, stdout=subprocess.PIPE) - return ret - except: - os._exit(1) - if not profiling: - os._exit(child()) - else: - profname = "profile-%s.log" % (fn.replace("/", "-") + "-" + taskname) - prof = profile.Profile() - try: - ret = profile.Profile.runcall(prof, child) - finally: - prof.dump_stats(profname) - bb.utils.process_profilelog(profname) - os._exit(ret) - else: - for key, value in iter(envbackup.items()): - if value is None: - del os.environ[key] - else: - os.environ[key] = value - - return pid, pipein, pipeout - -class runQueueWorkerPipe(): - """ - Abstraction for a pipe between a worker thread and the worker server - """ - def __init__(self, pipein, pipeout): - self.input = pipein - if pipeout: - pipeout.close() - bb.utils.nonblockingfd(self.input) - self.queue = bytearray() - - def read(self): - start = len(self.queue) - try: - self.queue.extend(self.input.read(512*1024) or b"") - except (OSError, IOError) as e: - if e.errno != errno.EAGAIN: - raise - - end = len(self.queue) - index = self.queue.find(b"") - while index != -1: - msg = self.queue[:index+8] - assert msg.startswith(b"") and msg.count(b"") == 1 - worker_fire_prepickled(msg) - self.queue = self.queue[index+8:] - index = self.queue.find(b"") - return (end > start) - - def close(self): - while self.read(): - continue - if len(self.queue) > 0: - print("Warning, worker child left partial message: %s" % self.queue) - self.input.close() - -normalexit = False - -class BitbakeWorker(object): - def __init__(self, din): - self.input = din - bb.utils.nonblockingfd(self.input) - self.queue = bytearray() - self.cookercfg = None - self.databuilder = None - self.data = None - self.extraconfigdata = None - self.build_pids = {} - self.build_pipes = {} - - signal.signal(signal.SIGTERM, self.sigterm_exception) - # Let SIGHUP exit as SIGTERM - signal.signal(signal.SIGHUP, self.sigterm_exception) - if "beef" in sys.argv[1]: - bb.utils.set_process_name("Worker (Fakeroot)") - else: - bb.utils.set_process_name("Worker") - - def sigterm_exception(self, signum, stackframe): - if signum == signal.SIGTERM: - bb.warn("Worker received SIGTERM, shutting down...") - elif signum == signal.SIGHUP: - bb.warn("Worker received SIGHUP, shutting down...") - self.handle_finishnow(None) - signal.signal(signal.SIGTERM, signal.SIG_DFL) - os.kill(os.getpid(), signal.SIGTERM) - - def serve(self): - while True: - (ready, _, _) = select.select([self.input] + [i.input for i in self.build_pipes.values()], [] , [], 1) - if self.input in ready: - try: - r = self.input.read() - if len(r) == 0: - # EOF on pipe, server must have terminated - self.sigterm_exception(signal.SIGTERM, None) - self.queue.extend(r) - except (OSError, IOError): - pass - if len(self.queue): - self.handle_item(b"cookerconfig", self.handle_cookercfg) - self.handle_item(b"extraconfigdata", self.handle_extraconfigdata) - self.handle_item(b"workerdata", self.handle_workerdata) - self.handle_item(b"newtaskhashes", self.handle_newtaskhashes) - self.handle_item(b"runtask", self.handle_runtask) - self.handle_item(b"finishnow", self.handle_finishnow) - self.handle_item(b"ping", self.handle_ping) - self.handle_item(b"quit", self.handle_quit) - - for pipe in self.build_pipes: - if self.build_pipes[pipe].input in ready: - self.build_pipes[pipe].read() - if len(self.build_pids): - while self.process_waitpid(): - continue - - def handle_item(self, item, func): - opening_tag = b"<" + item + b">" - if not self.queue.startswith(opening_tag): - return - - tag_len = len(opening_tag) - if len(self.queue) < tag_len + 4: - # we need to receive more data - return - header = self.queue[tag_len:tag_len + 4] - payload_len = int.from_bytes(header, 'big') - # closing tag has length (tag_len + 1) - if len(self.queue) < tag_len * 2 + 1 + payload_len: - # we need to receive more data - return - - index = self.queue.find(b"") - if index != -1: - try: - func(self.queue[(tag_len + 4):index]) - except pickle.UnpicklingError: - workerlog_write("Unable to unpickle data: %s\n" % ":".join("{:02x}".format(c) for c in self.queue)) - raise - self.queue = self.queue[(index + len(b"")):] - - def handle_cookercfg(self, data): - self.cookercfg = pickle.loads(data) - self.databuilder = bb.cookerdata.CookerDataBuilder(self.cookercfg, worker=True) - self.databuilder.parseBaseConfiguration(worker=True) - self.data = self.databuilder.data - - def handle_extraconfigdata(self, data): - self.extraconfigdata = pickle.loads(data) - - def handle_workerdata(self, data): - self.workerdata = pickle.loads(data) - bb.build.verboseShellLogging = self.workerdata["build_verbose_shell"] - bb.build.verboseStdoutLogging = self.workerdata["build_verbose_stdout"] - bb.msg.loggerDefaultLogLevel = self.workerdata["logdefaultlevel"] - bb.msg.loggerDefaultDomains = self.workerdata["logdefaultdomain"] - for mc in self.databuilder.mcdata: - self.databuilder.mcdata[mc].setVar("PRSERV_HOST", self.workerdata["prhost"]) - self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.workerdata["hashservaddr"]) - self.databuilder.mcdata[mc].setVar("__bbclasstype", "recipe") - - def handle_newtaskhashes(self, data): - self.workerdata["newhashes"] = pickle.loads(data) - - def handle_ping(self, _): - workerlog_write("Handling ping\n") - - logger.warning("Pong from bitbake-worker!") - - def handle_quit(self, data): - workerlog_write("Handling quit\n") - - global normalexit - normalexit = True - sys.exit(0) - - def handle_runtask(self, data): - runtask = pickle.loads(data) - - fn = runtask['fn'] - task = runtask['task'] - taskname = runtask['taskname'] - - workerlog_write("Handling runtask %s %s %s\n" % (task, fn, taskname)) - - pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, self.extraconfigdata, runtask) - self.build_pids[pid] = task - self.build_pipes[pid] = runQueueWorkerPipe(pipein, pipeout) - - def process_waitpid(self): - """ - Return none is there are no processes awaiting result collection, otherwise - collect the process exit codes and close the information pipe. - """ - try: - pid, status = os.waitpid(-1, os.WNOHANG) - if pid == 0 or os.WIFSTOPPED(status): - return False - except OSError: - return False - - workerlog_write("Exit code of %s for pid %s\n" % (status, pid)) - - if os.WIFEXITED(status): - status = os.WEXITSTATUS(status) - elif os.WIFSIGNALED(status): - # Per shell conventions for $?, when a process exits due to - # a signal, we return an exit code of 128 + SIGNUM - status = 128 + os.WTERMSIG(status) - - task = self.build_pids[pid] - del self.build_pids[pid] - - self.build_pipes[pid].close() - del self.build_pipes[pid] - - worker_fire_prepickled(b"" + pickle.dumps((task, status)) + b"") - - return True - - def handle_finishnow(self, _): - if self.build_pids: - logger.info("Sending SIGTERM to remaining %s tasks", len(self.build_pids)) - for k, v in iter(self.build_pids.items()): - try: - os.kill(-k, signal.SIGTERM) - os.waitpid(-1, 0) - except: - pass - for pipe in self.build_pipes: - self.build_pipes[pipe].read() - -try: - worker = BitbakeWorker(os.fdopen(sys.stdin.fileno(), 'rb')) - if not profiling: - worker.serve() - else: - profname = "profile-worker.log" - prof = profile.Profile() - try: - profile.Profile.runcall(prof, worker.serve) - finally: - prof.dump_stats(profname) - bb.utils.process_profilelog(profname) -except BaseException as e: - if not normalexit: - import traceback - sys.stderr.write(traceback.format_exc()) - sys.stderr.write(str(e)) -finally: - worker_thread_exit = True - worker_thread.join() - -workerlog_write("exiting") -if not normalexit: - sys.exit(1) -sys.exit(0) diff --git a/bitbake/bin/git-make-shallow b/bitbake/bin/git-make-shallow deleted file mode 100755 index e6c180b4d6..0000000000 --- a/bitbake/bin/git-make-shallow +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -"""git-make-shallow: make the current git repository shallow - -Remove the history of the specified revisions, then optionally filter the -available refs to those specified. -""" - -import argparse -import collections -import errno -import itertools -import os -import subprocess -import sys -import warnings -warnings.simplefilter("default") - -version = 1.0 - - -git_cmd = ['git', '-c', 'safe.bareRepository=all'] - -def main(): - if sys.version_info < (3, 4, 0): - sys.exit('Python 3.4 or greater is required') - - git_dir = check_output(git_cmd + ['rev-parse', '--git-dir']).rstrip() - shallow_file = os.path.join(git_dir, 'shallow') - if os.path.exists(shallow_file): - try: - check_output(git_cmd + ['fetch', '--unshallow']) - except subprocess.CalledProcessError: - try: - os.unlink(shallow_file) - except OSError as exc: - if exc.errno != errno.ENOENT: - raise - - args = process_args() - revs = check_output(git_cmd + ['rev-list'] + args.revisions).splitlines() - - make_shallow(shallow_file, args.revisions, args.refs) - - ref_revs = check_output(git_cmd + ['rev-list'] + args.refs).splitlines() - remaining_history = set(revs) & set(ref_revs) - for rev in remaining_history: - if check_output(git_cmd + ['rev-parse', '{}^@'.format(rev)]): - sys.exit('Error: %s was not made shallow' % rev) - - filter_refs(args.refs) - - if args.shrink: - shrink_repo(git_dir) - subprocess.check_call(git_cmd + ['fsck', '--unreachable']) - - -def process_args(): - # TODO: add argument to automatically keep local-only refs, since they - # can't be easily restored with a git fetch. - parser = argparse.ArgumentParser(description='Remove the history of the specified revisions, then optionally filter the available refs to those specified.') - parser.add_argument('--ref', '-r', metavar='REF', action='append', dest='refs', help='remove all but the specified refs (cumulative)') - parser.add_argument('--shrink', '-s', action='store_true', help='shrink the git repository by repacking and pruning') - parser.add_argument('revisions', metavar='REVISION', nargs='+', help='a git revision/commit') - if len(sys.argv) < 2: - parser.print_help() - sys.exit(2) - - args = parser.parse_args() - - if args.refs: - args.refs = check_output(git_cmd + ['rev-parse', '--symbolic-full-name'] + args.refs).splitlines() - else: - args.refs = get_all_refs(lambda r, t, tt: t == 'commit' or tt == 'commit') - - args.refs = list(filter(lambda r: not r.endswith('/HEAD'), args.refs)) - args.revisions = check_output(git_cmd + ['rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines() - return args - - -def check_output(cmd, input=None): - return subprocess.check_output(cmd, universal_newlines=True, input=input) - - -def make_shallow(shallow_file, revisions, refs): - """Remove the history of the specified revisions.""" - for rev in follow_history_intersections(revisions, refs): - print("Processing %s" % rev) - with open(shallow_file, 'a') as f: - f.write(rev + '\n') - - -def get_all_refs(ref_filter=None): - """Return all the existing refs in this repository, optionally filtering the refs.""" - ref_output = check_output(git_cmd + ['for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)']) - ref_split = [tuple(iter_extend(l.rsplit('\t'), 3)) for l in ref_output.splitlines()] - if ref_filter: - ref_split = (e for e in ref_split if ref_filter(*e)) - refs = [r[0] for r in ref_split] - return refs - - -def iter_extend(iterable, length, obj=None): - """Ensure that iterable is the specified length by extending with obj.""" - return itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length) - - -def filter_refs(refs): - """Remove all but the specified refs from the git repository.""" - all_refs = get_all_refs() - to_remove = set(all_refs) - set(refs) - if to_remove: - check_output(git_cmd + ['update-ref', '--no-deref', '--stdin', '-z'], - input=''.join('delete ' + l + '\0\0' for l in to_remove)) - - -def follow_history_intersections(revisions, refs): - """Determine all the points where the history of the specified revisions intersects the specified refs.""" - queue = collections.deque(revisions) - seen = set() - - for rev in iter_except(queue.popleft, IndexError): - if rev in seen: - continue - - parents = check_output(git_cmd + ['rev-parse', '%s^@' % rev]).splitlines() - - yield rev - seen.add(rev) - - if not parents: - continue - - check_refs = check_output(git_cmd + ['merge-base', '--independent'] + sorted(refs)).splitlines() - for parent in parents: - for ref in check_refs: - print("Checking %s vs %s" % (parent, ref)) - try: - merge_base = check_output(git_cmd + ['merge-base', parent, ref]).rstrip() - except subprocess.CalledProcessError: - continue - else: - queue.append(merge_base) - - -def iter_except(func, exception, start=None): - """Yield a function repeatedly until it raises an exception.""" - try: - if start is not None: - yield start() - while True: - yield func() - except exception: - pass - - -def shrink_repo(git_dir): - """Shrink the newly shallow repository, removing the unreachable objects.""" - subprocess.check_call(git_cmd + ['reflog', 'expire', '--expire-unreachable=now', '--all']) - subprocess.check_call(git_cmd + ['repack', '-ad']) - try: - os.unlink(os.path.join(git_dir, 'objects', 'info', 'alternates')) - except OSError as exc: - if exc.errno != errno.ENOENT: - raise - subprocess.check_call(git_cmd + ['prune', '--expire', 'now']) - - -if __name__ == '__main__': - main() diff --git a/bitbake/bin/toaster b/bitbake/bin/toaster deleted file mode 100755 index d20cbe90ba..0000000000 --- a/bitbake/bin/toaster +++ /dev/null @@ -1,339 +0,0 @@ -#!/bin/echo ERROR: This script needs to be sourced. Please run as . - -# toaster - shell script to start Toaster - -# Copyright (C) 2013-2015 Intel Corp. -# -# SPDX-License-Identifier: GPL-2.0-or-later -# - -HELP=" -Usage 1: source toaster start|stop [webport=] [noweb] [nobuild] [toasterdir] - Optional arguments: - [nobuild] Setup the environment for capturing builds with toaster but disable managed builds - [noweb] Setup the environment for capturing builds with toaster but don't start the web server - [webport] Set the development server (default: localhost:8000) - [toasterdir] Set absolute path to be used as TOASTER_DIR (default: BUILDDIR/../) -Usage 2: source toaster manage [createsuperuser|lsupdates|migrate|makemigrations|checksettings|collectstatic|...] -" - -custom_extention() -{ - custom_extension=$BBBASEDIR/lib/toaster/orm/fixtures/custom_toaster_append.sh - if [ -f $custom_extension ] ; then - $custom_extension $* - fi -} - -databaseCheck() -{ - retval=0 - # you can always add a superuser later via - # ../bitbake/lib/toaster/manage.py createsuperuser --username= - $MANAGE migrate --noinput || retval=1 - - if [ $retval -eq 1 ]; then - echo "Failed migrations, halting system start" 1>&2 - return $retval - fi - # Make sure that checksettings can pick up any value for TEMPLATECONF - export TEMPLATECONF - $MANAGE checksettings --traceback || retval=1 - - if [ $retval -eq 1 ]; then - printf "\nError while checking settings; exiting\n" - return $retval - fi - - return $retval -} - -webserverKillAll() -{ - local pidfile - if [ -f ${BUILDDIR}/.toastermain.pid ] ; then - custom_extention web_stop_postpend - else - custom_extention noweb_stop_postpend - fi - for pidfile in ${BUILDDIR}/.toastermain.pid ${BUILDDIR}/.runbuilds.pid; do - if [ -f ${pidfile} ]; then - pid=`cat ${pidfile}` - while kill -0 $pid 2>/dev/null; do - kill -SIGTERM $pid 2>/dev/null - sleep 1 - done - rm ${pidfile} - fi - done -} - -webserverStartAll() -{ - # do not start if toastermain points to a valid process - if ! cat "${BUILDDIR}/.toastermain.pid" 2>/dev/null | xargs -I{} kill -0 {} ; then - retval=1 - rm "${BUILDDIR}/.toastermain.pid" - fi - - retval=0 - - # check the database - databaseCheck || return 1 - - echo "Starting webserver..." - - $MANAGE runserver --noreload "$ADDR_PORT" \ - >${TOASTER_LOGS_DIR}/web.log 2>&1 \ - & echo $! >${BUILDDIR}/.toastermain.pid - - sleep 1 - - if ! cat "${BUILDDIR}/.toastermain.pid" | xargs -I{} kill -0 {} ; then - retval=1 - rm "${BUILDDIR}/.toastermain.pid" - else - echo "Toaster development webserver started at http://$ADDR_PORT" - echo -e "\nYou can now run 'bitbake ' on the command line and monitor your build in Toaster.\nYou can also use a Toaster project to configure and run a build.\n" - custom_extention web_start_postpend $ADDR_PORT - fi - - return $retval -} - -INSTOPSYSTEM=0 - -# define the stop command -stop_system() -{ - # prevent reentry - if [ $INSTOPSYSTEM -eq 1 ]; then return; fi - INSTOPSYSTEM=1 - webserverKillAll - # unset exported variables - unset TOASTER_DIR - unset BITBAKE_UI - unset BBBASEDIR - trap - SIGHUP - #trap - SIGCHLD - INSTOPSYSTEM=0 -} - -verify_prereq() { - # Verify Django version - reqfile=$(python3 -c "import os; print(os.path.realpath('$BBBASEDIR/toaster-requirements.txt'))") - exp='s/Django\([><=]\+\)\([^,]\+\),\([><=]\+\)\(.\+\)/' - # expand version parts to 2 digits to support 1.10.x > 1.8 - # (note:helper functions hard to insert in-line) - exp=$exp'import sys,django;' - exp=$exp'version=["%02d" % int(n) for n in django.get_version().split(".")];' - exp=$exp'vmin=["%02d" % int(n) for n in "\2".split(".")];' - exp=$exp'vmax=["%02d" % int(n) for n in "\4".split(".")];' - exp=$exp'sys.exit(not (version \1 vmin and version \3 vmax))' - exp=$exp'/p' - if ! sed -n "$exp" $reqfile | python3 - ; then - req=`grep ^Django $reqfile` - echo "This program needs $req" - echo "Please install with pip3 install -r $reqfile" - return 2 - fi - - return 0 -} - -# read command line parameters -if [ -n "$BASH_SOURCE" ] ; then - TOASTER=${BASH_SOURCE} -elif [ -n "$ZSH_NAME" ] ; then - TOASTER=${(%):-%x} -else - TOASTER=$0 -fi - -export BBBASEDIR=`dirname $TOASTER`/.. -MANAGE="python3 $BBBASEDIR/lib/toaster/manage.py" -if [ -z "$OE_ROOT" ]; then - OE_ROOT=`dirname $TOASTER`/../.. -fi - -# this is the configuraton file we are using for toaster -# we are using the same logic that oe-setup-builddir uses -# (based on TEMPLATECONF and .templateconf) to determine -# which toasterconf.json to use. -# note: There are a number of relative path assumptions -# in the local layers that currently make using an arbitrary -# toasterconf.json difficult. - -# BBSETUP no longer supports .templateconf -if [ -f $OE_ROOT/.templateconf ] ; then - . $OE_ROOT/.templateconf - if [ -n "$TEMPLATECONF" ]; then - if [ ! -d "$TEMPLATECONF" ]; then - # Allow TEMPLATECONF=meta-xyz/conf as a shortcut - if [ -d "$OE_ROOT/$TEMPLATECONF" ]; then - TEMPLATECONF="$OE_ROOT/$TEMPLATECONF" - fi - fi - fi -else - # Force poky - TEMPLATECONF="layers/meta-yocto/meta-poky/conf/templates/default" -fi - -unset OE_ROOT - - -WEBSERVER=1 -export TOASTER_BUILDSERVER=1 -ADDR_PORT="localhost:8000" -TOASTERDIR=`dirname $BUILDDIR` -# ${BUILDDIR}/toaster_logs/ became the default location for toaster logs -# This is needed for implemented django-log-viewer: https://pypi.org/project/django-log-viewer/ -# If the directory does not exist, create it. -TOASTER_LOGS_DIR="${BUILDDIR}/toaster_logs/" -if [ ! -d $TOASTER_LOGS_DIR ] -then - mkdir $TOASTER_LOGS_DIR -fi -unset CMD -for param in $*; do - case $param in - noweb ) - WEBSERVER=0 - ;; - nobuild ) - TOASTER_BUILDSERVER=0 - ;; - start ) - CMD=$param - ;; - stop ) - CMD=$param - ;; - webport=*) - ADDR_PORT="${param#*=}" - # Split the addr:port string - ADDR=`echo $ADDR_PORT | cut -f 1 -d ':'` - PORT=`echo $ADDR_PORT | cut -f 2 -d ':'` - # If only a port has been speified then set address to localhost. - if [ $ADDR = $PORT ] ; then - ADDR_PORT="localhost:$PORT" - fi - ;; - toasterdir=*) - TOASTERDIR="${param#*=}" - ;; - manage ) - CMD=$param - manage_cmd="" - ;; - --help) - echo "$HELP" - return 0 - ;; - *) - if [ "manage" == "$CMD" ] ; then - manage_cmd="$manage_cmd $param" - else - echo "$HELP" - exit 1 - fi - ;; - - esac -done - -if [ `basename \"$0\"` = `basename \"${TOASTER}\"` ]; then - echo "Error: This script needs to be sourced. Please run as . $TOASTER" - return 1 -fi - -verify_prereq || return 1 - -# We make sure we're running in the current shell and in a good environment -if [ -z "$BUILDDIR" ] || ! which bitbake >/dev/null 2>&1 ; then - echo "Error: Build environment is not setup or bitbake is not in path." 1>&2 - return 2 -fi - -# this defines the dir toaster will use for -# 1) clones of layers (in _toaster_clones ) -# 2) the build dir (in build) -# 3) the sqlite db if that is being used. -# 4) pid's we need to clean up on exit/shutdown -# By default we move this all into the Toaster's parent build directory -export TOASTER_DIR=$BUILDDIR -export BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS TOASTER_DIR" - -# Determine the action. If specified by arguments, fine, if not, toggle it -if [ "$CMD" = "start" ] ; then - if [ -n "$BBSERVER" ]; then - echo " Toaster is already running. Exiting..." - return 1 -fi -elif [ "$CMD" = "" ]; then - echo "No command specified" - echo "$HELP" - return 1 -fi - -echo "The system will $CMD." - -# Execute the commands -custom_extention toaster_prepend $CMD $ADDR_PORT - -case $CMD in - start ) - # check if addr:port is not in use - if [ "$CMD" == 'start' ]; then - if [ $WEBSERVER -gt 0 ]; then - $MANAGE checksocket "$ADDR_PORT" || return 1 - fi - fi - - # Create configuration file - conf=${BUILDDIR}/conf/local.conf - line='INHERIT+="toaster buildhistory"' - grep -q "$line" $conf || echo $line >> $conf - - if [ $WEBSERVER -eq 0 ] ; then - # Do not update the database for "noweb" unless - # it does not yet exist - if [ ! -f "$TOASTER_DIR/toaster.sqlite" ] ; then - if ! databaseCheck; then - echo "Failed ${CMD}." - return 4 - fi - fi - custom_extention noweb_start_postpend $ADDR_PORT - fi - if [ $WEBSERVER -gt 0 ] && ! webserverStartAll; then - echo "Failed ${CMD}." - return 4 - fi - export BITBAKE_UI='toasterui' - if [ $TOASTER_BUILDSERVER -eq 1 ] ; then - $MANAGE runbuilds \ - >${TOASTER_LOGS_DIR}/toaster_runbuilds.log 2>&1 \ - & echo $! >${BUILDDIR}/.runbuilds.pid - else - echo "Toaster build server not started." - fi - - # set fail safe stop system on terminal exit - trap stop_system SIGHUP - echo "Successful ${CMD}." - custom_extention toaster_postpend $CMD $ADDR_PORT - return 0 - ;; - stop ) - stop_system - echo "Successful ${CMD}." - ;; - manage ) - cd $BBBASEDIR/lib/toaster - $MANAGE $manage_cmd - ;; -esac -custom_extention toaster_postpend $CMD $ADDR_PORT - diff --git a/bitbake/bin/toaster-eventreplay b/bitbake/bin/toaster-eventreplay deleted file mode 100755 index 74a319320e..0000000000 --- a/bitbake/bin/toaster-eventreplay +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2014 Alex Damian -# -# SPDX-License-Identifier: GPL-2.0-only -# -# This file re-uses code spread throughout other Bitbake source files. -# As such, all other copyrights belong to their own right holders. -# - -""" -This command takes a filename as a single parameter. The filename is read -as a build eventlog, and the ToasterUI is used to process events in the file -and log data in the database -""" - -import os -import sys -import json -import pickle -import codecs -import warnings -warnings.simplefilter("default") - -from collections import namedtuple - -# mangle syspath to allow easy import of modules -from os.path import join, dirname, abspath -sys.path.insert(0, join(dirname(dirname(abspath(__file__))), 'lib')) - -import bb.cooker -from bb.ui import toasterui -from bb.ui import eventreplay - -def main(argv): - with open(argv[-1]) as eventfile: - # load variables from the first line - variables = None - while line := eventfile.readline().strip(): - try: - variables = json.loads(line)['allvariables'] - break - except (KeyError, json.JSONDecodeError): - continue - if not variables: - sys.exit("Cannot find allvariables entry in event log file %s" % argv[-1]) - eventfile.seek(0) - params = namedtuple('ConfigParams', ['observe_only'])(True) - player = eventreplay.EventPlayer(eventfile, variables) - - return toasterui.main(player, player, params) - -# run toaster ui on our mock bitbake class -if __name__ == "__main__": - if len(sys.argv) != 2: - print("Usage: %s " % os.path.basename(sys.argv[0])) - sys.exit(1) - - sys.exit(main(sys.argv)) diff --git a/bitbake/contrib/README b/bitbake/contrib/README deleted file mode 100644 index 25e5156619..0000000000 --- a/bitbake/contrib/README +++ /dev/null @@ -1 +0,0 @@ -This directory is for additional contributed files which may be useful. diff --git a/bitbake/contrib/autobuilderlog.json b/bitbake/contrib/autobuilderlog.json deleted file mode 100644 index 193a675a1f..0000000000 --- a/bitbake/contrib/autobuilderlog.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "version": 1, - "loggers": { - "BitBake.SigGen.HashEquiv": { - "level": "VERBOSE", - "handlers": ["BitBake.verbconsole"] - }, - "BitBake.RunQueue.HashEquiv": { - "level": "VERBOSE", - "handlers": ["BitBake.verbconsole"] - } - } -} diff --git a/bitbake/contrib/b4-wrapper-bitbake.py b/bitbake/contrib/b4-wrapper-bitbake.py deleted file mode 100755 index 87dff2c3a7..0000000000 --- a/bitbake/contrib/b4-wrapper-bitbake.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright OpenEmbedded Contributors -# -# SPDX-License-Identifier: MIT -# -# This script is to be called by b4: -# - through b4.send-auto-cc-cmd with "send-auto-cc-cmd" as first argument, -# -# When send-auto-cc-cmd is passed: -# -# This returns the list of Cc recipients for a patch. -# -# This script takes as stdin a patch. - -import subprocess -import sys - -cmd = sys.argv[1] -if cmd != "send-auto-cc-cmd": - sys.exit(-1) - -patch = sys.stdin.read() - -if subprocess.call(["which", "lsdiff"], stdout=subprocess.DEVNULL) != 0: - print("lsdiff missing from host, please install patchutils") - sys.exit(-1) - -files = subprocess.check_output(["lsdiff", "--strip-match=1", "--strip=1", "--include=doc/*"], - input=patch, text=True) -if len(files): - print("docs@lists.yoctoproject.org") -else: -# Handle patches made with --no-prefix - files = subprocess.check_output(["lsdiff", "--include=doc/*"], - input=patch, text=True) - if len(files): - print("docs@lists.yoctoproject.org") - -sys.exit(0) diff --git a/bitbake/contrib/bbdev.sh b/bitbake/contrib/bbdev.sh deleted file mode 100644 index 33a78531e1..0000000000 --- a/bitbake/contrib/bbdev.sh +++ /dev/null @@ -1,31 +0,0 @@ -# This is a shell function to be sourced into your shell or placed in your .profile, -# which makes setting things up for BitBake a bit easier. -# -# The author disclaims copyright to the contents of this file and places it in the -# public domain. - -bbdev () { - local BBDIR PKGDIR BUILDDIR - if test x"$1" = "x--help"; then echo >&2 "syntax: bbdev [bbdir [pkgdir [builddir]]]"; return 1; fi - if test x"$1" = x; then BBDIR=`pwd`; else BBDIR=$1; fi - if test x"$2" = x; then PKGDIR=`pwd`; else PKGDIR=$2; fi - if test x"$3" = x; then BUILDDIR=`pwd`; else BUILDDIR=$3; fi - - BBDIR=`readlink -f $BBDIR` - PKGDIR=`readlink -f $PKGDIR` - BUILDDIR=`readlink -f $BUILDDIR` - if ! (test -d $BBDIR && test -d $PKGDIR && test -d $BUILDDIR); then - echo >&2 "syntax: bbdev [bbdir [pkgdir [builddir]]]" - return 1 - fi - - PATH=$BBDIR/bin:$PATH - BBPATH=$BBDIR - if test x"$BBDIR" != x"$PKGDIR"; then - BBPATH=$PKGDIR:$BBPATH - fi - if test x"$PKGDIR" != x"$BUILDDIR"; then - BBPATH=$BUILDDIR:$BBPATH - fi - export BBPATH -} diff --git a/bitbake/contrib/bbparse-torture.py b/bitbake/contrib/bbparse-torture.py deleted file mode 100755 index c25d547bb8..0000000000 --- a/bitbake/contrib/bbparse-torture.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python3 -# -# Copyright (C) 2020 Joshua Watt -# -# SPDX-License-Identifier: MIT - -import argparse -import os -import random -import shutil -import signal -import subprocess -import sys -import time - - -def try_unlink(path): - try: - os.unlink(path) - except: - pass - - -def main(): - def cleanup(): - shutil.rmtree("tmp/cache", ignore_errors=True) - try_unlink("bitbake-cookerdaemon.log") - try_unlink("bitbake.sock") - try_unlink("bitbake.lock") - - parser = argparse.ArgumentParser( - description="Bitbake parser torture test", - epilog=""" - A torture test for bitbake's parser. Repeatedly interrupts parsing until - bitbake decides to deadlock. - """, - ) - - args = parser.parse_args() - - if not "BUILDDIR" in os.environ: - print( - "'BUILDDIR' not found in the environment. Did you initialize the build environment?" - ) - return 1 - - os.chdir(os.environ["BUILDDIR"]) - - run_num = 0 - while True: - if run_num % 100 == 0: - print("Calibrating wait time...") - cleanup() - - start_time = time.monotonic() - r = subprocess.run(["bitbake", "-p"]) - max_wait_time = time.monotonic() - start_time - - if r.returncode != 0: - print("Calibration run exited with %d" % r.returncode) - return 1 - - print("Maximum wait time is %f seconds" % max_wait_time) - - run_num += 1 - wait_time = random.random() * max_wait_time - - print("Run #%d" % run_num) - print("Will sleep for %f seconds" % wait_time) - - cleanup() - with subprocess.Popen(["bitbake", "-p"]) as proc: - time.sleep(wait_time) - proc.send_signal(signal.SIGINT) - try: - proc.wait(45) - except subprocess.TimeoutExpired: - print("Run #%d: Waited too long. Possible deadlock!" % run_num) - proc.wait() - return 1 - - if proc.returncode == 0: - print("Exited successfully. Timeout too long?") - else: - print("Exited with %d" % proc.returncode) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/bitbake/contrib/dump_cache.py b/bitbake/contrib/dump_cache.py deleted file mode 100755 index c6723cbf0a..0000000000 --- a/bitbake/contrib/dump_cache.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2012, 2018 Wind River Systems, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -# -# Used for dumping the bb_cache.dat -# -import os -import sys -import argparse - -# For importing bb.cache -sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '../lib')) -from bb.cache import CoreRecipeInfo - -import pickle - -class DumpCache(object): - def __init__(self): - parser = argparse.ArgumentParser( - description="bb_cache.dat's dumper", - epilog="Use %(prog)s --help to get help") - parser.add_argument("-r", "--recipe", - help="specify the recipe, default: all recipes", action="store") - parser.add_argument("-m", "--members", - help = "specify the member, use comma as separator for multiple ones, default: all members", action="store", default="") - parser.add_argument("-s", "--skip", - help = "skip skipped recipes", action="store_true") - parser.add_argument("cachefile", - help = "specify bb_cache.dat", nargs = 1, action="store", default="") - - self.args = parser.parse_args() - - def main(self): - with open(self.args.cachefile[0], "rb") as cachefile: - pickled = pickle.Unpickler(cachefile) - while True: - try: - key = pickled.load() - val = pickled.load() - except Exception: - break - if isinstance(val, CoreRecipeInfo): - pn = val.pn - - if self.args.recipe and self.args.recipe != pn: - continue - - if self.args.skip and val.skipped: - continue - - if self.args.members: - out = key - for member in self.args.members.split(','): - out += ": %s" % val.__dict__.get(member) - print("%s" % out) - else: - print("%s: %s" % (key, val.__dict__)) - elif not self.args.recipe: - print("%s %s" % (key, val)) - -if __name__ == "__main__": - try: - dump = DumpCache() - ret = dump.main() - except Exception as esc: - ret = 1 - import traceback - traceback.print_exc() - sys.exit(ret) diff --git a/bitbake/contrib/hashserv/Dockerfile b/bitbake/contrib/hashserv/Dockerfile deleted file mode 100644 index aec1f86fc9..0000000000 --- a/bitbake/contrib/hashserv/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2021 Joshua Watt -# -# Dockerfile to build a bitbake hash equivalence server container -# -# From the root of the bitbake repository, run: -# -# docker build -f contrib/hashserv/Dockerfile . -# - -FROM alpine:3.13.1 - -RUN apk add --no-cache python3 libgcc - -COPY bin/bitbake-hashserv /opt/bbhashserv/bin/ -COPY lib/hashserv /opt/bbhashserv/lib/hashserv/ -COPY lib/bb /opt/bbhashserv/lib/bb/ -COPY lib/codegen.py /opt/bbhashserv/lib/codegen.py -COPY lib/ply /opt/bbhashserv/lib/ply/ -COPY lib/bs4 /opt/bbhashserv/lib/bs4/ - -ENTRYPOINT ["/opt/bbhashserv/bin/bitbake-hashserv"] diff --git a/bitbake/contrib/prserv/Dockerfile b/bitbake/contrib/prserv/Dockerfile deleted file mode 100644 index 9585fe3f07..0000000000 --- a/bitbake/contrib/prserv/Dockerfile +++ /dev/null @@ -1,62 +0,0 @@ -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2022 Daniel Gomez -# -# Dockerfile to build a bitbake PR service container -# -# From the root of the bitbake repository, run: -# -# docker build -f contrib/prserv/Dockerfile . -t prserv -# -# Running examples: -# -# 1. PR Service in RW mode, port 18585: -# -# docker run --detach --tty \ -# --env PORT=18585 \ -# --publish 18585:18585 \ -# --volume $PWD:/var/lib/bbprserv \ -# prserv -# -# 2. PR Service in RO mode, default port (8585) and custom LOGFILE: -# -# docker run --detach --tty \ -# --env DBMODE="--read-only" \ -# --env LOGFILE=/var/lib/bbprserv/prservro.log \ -# --publish 8585:8585 \ -# --volume $PWD:/var/lib/bbprserv \ -# prserv -# - -FROM alpine:3.14.4 - -RUN apk add --no-cache python3 - -COPY bin/bitbake-prserv /opt/bbprserv/bin/ -COPY lib/prserv /opt/bbprserv/lib/prserv/ -COPY lib/bb /opt/bbprserv/lib/bb/ -COPY lib/codegen.py /opt/bbprserv/lib/codegen.py -COPY lib/ply /opt/bbprserv/lib/ply/ -COPY lib/bs4 /opt/bbprserv/lib/bs4/ - -ENV PATH=$PATH:/opt/bbprserv/bin - -RUN mkdir -p /var/lib/bbprserv - -ENV DBFILE=/var/lib/bbprserv/prserv.sqlite3 \ - LOGFILE=/var/lib/bbprserv/prserv.log \ - LOGLEVEL=debug \ - HOST=0.0.0.0 \ - PORT=8585 \ - DBMODE="" - -ENTRYPOINT [ "/bin/sh", "-c", \ -"bitbake-prserv \ ---file=$DBFILE \ ---log=$LOGFILE \ ---loglevel=$LOGLEVEL \ ---start \ ---host=$HOST \ ---port=$PORT \ -$DBMODE \ -&& tail -f $LOGFILE"] diff --git a/bitbake/contrib/vim/LICENSE.txt b/bitbake/contrib/vim/LICENSE.txt deleted file mode 100644 index c7d915024d..0000000000 --- a/bitbake/contrib/vim/LICENSE.txt +++ /dev/null @@ -1,18 +0,0 @@ -The MIT License (MIT) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/bitbake/contrib/vim/ftdetect/bitbake.vim b/bitbake/contrib/vim/ftdetect/bitbake.vim deleted file mode 100644 index 1d958fec54..0000000000 --- a/bitbake/contrib/vim/ftdetect/bitbake.vim +++ /dev/null @@ -1,32 +0,0 @@ -" Vim filetype detection file -" Language: BitBake -" Author: Ricardo Salveti -" Copyright: Copyright (C) 2008 Ricardo Salveti -" Licence: You may redistribute this under the same terms as Vim itself -" -" This sets up the syntax highlighting for BitBake files, like .bb, .bbclass and .inc - -if &compatible || version < 600 || exists("b:loaded_bitbake_plugin") - finish -endif - -" .bb, .bbappend and .bbclass -au BufNewFile,BufRead *.{bb,bbappend,bbclass} setfiletype bitbake - -" .inc -- meanwhile included upstream -if !has("patch-9.1.1732") - au BufNewFile,BufRead *.inc call s:BBIncDetect() - def s:BBIncDetect() - var lines = getline(1) .. getline(2) .. getline(3) - if lines =~# '\<\%(require\|inherit\)\>' || lines =~# '[A-Z][A-Za-z0-9_:${}/]*\s\+\%(??\|[?:+.]\)\?=.\? ' - set filetype=bitbake - endif - enddef -endif - -" .conf -au BufNewFile,BufRead *.conf - \ if (match(expand("%:p:h"), "conf") > 0) | - \ set filetype=bitbake | - \ endif - diff --git a/bitbake/contrib/vim/ftplugin/bitbake.vim b/bitbake/contrib/vim/ftplugin/bitbake.vim deleted file mode 100644 index 9e8d3e13ce..0000000000 --- a/bitbake/contrib/vim/ftplugin/bitbake.vim +++ /dev/null @@ -1,13 +0,0 @@ -" Only do this when not done yet for this buffer -if exists("b:did_ftplugin") - finish -endif - -" Don't load another plugin for this buffer -let b:did_ftplugin = 1 - -let b:undo_ftplugin = "setl cms< sts< sw< et< sua<" - -setlocal commentstring=#\ %s -setlocal softtabstop=4 shiftwidth=4 expandtab -setlocal suffixesadd+=.bb,.bbclass diff --git a/bitbake/contrib/vim/indent/bitbake.vim b/bitbake/contrib/vim/indent/bitbake.vim deleted file mode 100644 index 7ee9d69938..0000000000 --- a/bitbake/contrib/vim/indent/bitbake.vim +++ /dev/null @@ -1,343 +0,0 @@ -" Vim indent file -" Language: BitBake -" Copyright: Copyright (C) 2019 Agilent Technologies, Inc. -" Maintainer: Chris Laplante -" License: You may redistribute this under the same terms as Vim itself - - -if exists("b:did_indent") - finish -endif - -if exists("*BitbakeIndent") - finish -endif - -runtime! indent/sh.vim -unlet b:did_indent - -setlocal indentexpr=BitbakeIndent(v:lnum) -setlocal autoindent nolisp - -function s:is_bb_python_func_def(lnum) - let stack = synstack(a:lnum, 1) - if len(stack) == 0 - return 0 - endif - - let top = synIDattr(stack[0], "name") - echo top - - return synIDattr(stack[0], "name") == "bbPyFuncDef" -endfunction - -"""" begin modified from indent/python.vim, upstream commit 7a9bd7c1e0ce1baf5a02daf36eeae3638aa315c7 -"""" This copied code is licensed the same as Vim itself. -setlocal indentkeys+=<:>,=elif,=except - -let s:keepcpo= &cpo -set cpo&vim - -let s:maxoff = 50 " maximum number of lines to look backwards for () - -function! GetBBPythonIndent(lnum) - - " If this line is explicitly joined: If the previous line was also joined, - " line it up with that one, otherwise add two 'shiftwidth' - if getline(a:lnum - 1) =~ '\\$' - if a:lnum > 1 && getline(a:lnum - 2) =~ '\\$' - return indent(a:lnum - 1) - endif - return indent(a:lnum - 1) + (exists("g:pyindent_continue") ? eval(g:pyindent_continue) : (shiftwidth() * 2)) - endif - - " If the start of the line is in a string don't change the indent. - if has('syntax_items') - \ && synIDattr(synID(a:lnum, 1, 1), "name") =~ "String$" - return -1 - endif - - " Search backwards for the previous non-empty line. - let plnum = prevnonblank(v:lnum - 1) - - if plnum == 0 - " This is the first non-empty line, use zero indent. - return 0 - endif - - call cursor(plnum, 1) - - " Identing inside parentheses can be very slow, regardless of the searchpair() - " timeout, so let the user disable this feature if he doesn't need it - let disable_parentheses_indenting = get(g:, "pyindent_disable_parentheses_indenting", 0) - - if disable_parentheses_indenting == 1 - let plindent = indent(plnum) - let plnumstart = plnum - else - " searchpair() can be slow sometimes, limit the time to 150 msec or what is - " put in g:pyindent_searchpair_timeout - let searchpair_stopline = 0 - let searchpair_timeout = get(g:, 'pyindent_searchpair_timeout', 150) - - " If the previous line is inside parenthesis, use the indent of the starting - " line. - " Trick: use the non-existing "dummy" variable to break out of the loop when - " going too far back. - let parlnum = searchpair('(\|{\|\[', '', ')\|}\|\]', 'nbW', - \ "line('.') < " . (plnum - s:maxoff) . " ? dummy :" - \ . " synIDattr(synID(line('.'), col('.'), 1), 'name')" - \ . " =~ '\\(Comment\\|Todo\\|String\\)$'", - \ searchpair_stopline, searchpair_timeout) - if parlnum > 0 - " We may have found the opening brace of a BitBake Python task, e.g. 'python do_task {' - " If so, ignore it here - it will be handled later. - if s:is_bb_python_func_def(parlnum) - let parlnum = 0 - let plindent = indent(plnum) - let plnumstart = plnum - else - let plindent = indent(parlnum) - let plnumstart = parlnum - endif - else - let plindent = indent(plnum) - let plnumstart = plnum - endif - - " When inside parenthesis: If at the first line below the parenthesis add - " two 'shiftwidth', otherwise same as previous line. - " i = (a - " + b - " + c) - call cursor(a:lnum, 1) - let p = searchpair('(\|{\|\[', '', ')\|}\|\]', 'bW', - \ "line('.') < " . (a:lnum - s:maxoff) . " ? dummy :" - \ . " synIDattr(synID(line('.'), col('.'), 1), 'name')" - \ . " =~ '\\(Comment\\|Todo\\|String\\)$'", - \ searchpair_stopline, searchpair_timeout) - if p > 0 - if s:is_bb_python_func_def(p) - " Handle first non-empty line inside a BB Python task - if p == plnum - return shiftwidth() - endif - - " Handle the user actually trying to close a BitBake Python task - let line = getline(a:lnum) - if line =~ '^\s*}' - return -2 - endif - - " Otherwise ignore the brace - let p = 0 - else - if p == plnum - " When the start is inside parenthesis, only indent one 'shiftwidth'. - let pp = searchpair('(\|{\|\[', '', ')\|}\|\]', 'bW', - \ "line('.') < " . (a:lnum - s:maxoff) . " ? dummy :" - \ . " synIDattr(synID(line('.'), col('.'), 1), 'name')" - \ . " =~ '\\(Comment\\|Todo\\|String\\)$'", - \ searchpair_stopline, searchpair_timeout) - if pp > 0 - return indent(plnum) + (exists("g:pyindent_nested_paren") ? eval(g:pyindent_nested_paren) : shiftwidth()) - endif - return indent(plnum) + (exists("g:pyindent_open_paren") ? eval(g:pyindent_open_paren) : (shiftwidth() * 2)) - endif - if plnumstart == p - return indent(plnum) - endif - return plindent - endif - endif - - endif - - - " Get the line and remove a trailing comment. - " Use syntax highlighting attributes when possible. - let pline = getline(plnum) - let pline_len = strlen(pline) - if has('syntax_items') - " If the last character in the line is a comment, do a binary search for - " the start of the comment. synID() is slow, a linear search would take - " too long on a long line. - if synIDattr(synID(plnum, pline_len, 1), "name") =~ "\\(Comment\\|Todo\\)$" - let min = 1 - let max = pline_len - while min < max - let col = (min + max) / 2 - if synIDattr(synID(plnum, col, 1), "name") =~ "\\(Comment\\|Todo\\)$" - let max = col - else - let min = col + 1 - endif - endwhile - let pline = strpart(pline, 0, min - 1) - endif - else - let col = 0 - while col < pline_len - if pline[col] == '#' - let pline = strpart(pline, 0, col) - break - endif - let col = col + 1 - endwhile - endif - - " If the previous line ended with a colon, indent this line - if pline =~ ':\s*$' - return plindent + shiftwidth() - endif - - " If the previous line was a stop-execution statement... - " TODO: utilize this logic to deindent when ending a bbPyDefRegion - if getline(plnum) =~ '^\s*\(break\|continue\|raise\|return\|pass\|bb\.fatal\)\>' - " See if the user has already dedented - if indent(a:lnum) > indent(plnum) - shiftwidth() - " If not, recommend one dedent - return indent(plnum) - shiftwidth() - endif - " Otherwise, trust the user - return -1 - endif - - " If the current line begins with a keyword that lines up with "try" - if getline(a:lnum) =~ '^\s*\(except\|finally\)\>' - let lnum = a:lnum - 1 - while lnum >= 1 - if getline(lnum) =~ '^\s*\(try\|except\)\>' - let ind = indent(lnum) - if ind >= indent(a:lnum) - return -1 " indent is already less than this - endif - return ind " line up with previous try or except - endif - let lnum = lnum - 1 - endwhile - return -1 " no matching "try"! - endif - - " If the current line begins with a header keyword, dedent - if getline(a:lnum) =~ '^\s*\(elif\|else\)\>' - - " Unless the previous line was a one-liner - if getline(plnumstart) =~ '^\s*\(for\|if\|try\)\>' - return plindent - endif - - " Or the user has already dedented - if indent(a:lnum) <= plindent - shiftwidth() - return -1 - endif - - return plindent - shiftwidth() - endif - - " When after a () construct we probably want to go back to the start line. - " a = (b - " + c) - " here - if parlnum > 0 - return plindent - endif - - return -1 - -endfunction - -let &cpo = s:keepcpo -unlet s:keepcpo - -""" end of stuff from indent/python.vim - - -let b:did_indent = 1 -setlocal indentkeys+=0\" - - -function! BitbakeIndent(lnum) - if !has('syntax_items') - return -1 - endif - - let stack = synstack(a:lnum, 1) - if len(stack) == 0 - return -1 - endif - - let name = synIDattr(stack[0], "name") - - " TODO: support different styles of indentation for assignments. For now, - " we only support like this: - " VAR = " \ - " value1 \ - " value2 \ - " " - " - " i.e. each value indented by shiftwidth(), with the final quote " completely unindented. - if name == "bbVarValue" - " Quote handling is tricky. kernel.bbclass has this line for instance: - " EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" " HOSTCPP="${BUILD_CPP}"" - " Instead of trying to handle crazy cases like that, just assume that a - " double-quote on a line by itself (following an assignment) means the - " user is closing the assignment, and de-dent. - if getline(a:lnum) =~ '^\s*"$' - return 0 - endif - - let prevstack = synstack(a:lnum - 1, 1) - if len(prevstack) == 0 - return -1 - endif - - let prevname = synIDattr(prevstack[0], "name") - - " Only indent if there was actually a continuation character on - " the previous line, to avoid misleading indentation. - let prevlinelastchar = synIDattr(synID(a:lnum - 1, col([a:lnum - 1, "$"]) - 1, 1), "name") - let prev_continued = prevlinelastchar == "bbContinue" - - " Did the previous line introduce an assignment? - if index(["bbVarDef", "bbVarFlagDef"], prevname) != -1 - if prev_continued - return shiftwidth() - endif - endif - - if !prev_continued - return 0 - endif - - " Autoindent can take it from here - return -1 - endif - - if index(["bbPyDefRegion", "bbPyFuncRegion"], name) != -1 - let ret = GetBBPythonIndent(a:lnum) - " Should normally always be indented by at least one shiftwidth; but allow - " return of -1 (defer to autoindent) or -2 (force indent to 0) - if ret == 0 - return shiftwidth() - elseif ret == -2 - return 0 - endif - return ret - endif - - " TODO: GetShIndent doesn't detect tasks prepended with 'fakeroot' - " Need to submit a patch upstream to Vim to provide an extension point. - " Unlike the Python indenter, the Sh indenter is way too large to copy and - " modify here. - if name == "bbShFuncRegion" - return GetShIndent() - endif - - " TODO: - " + heuristics for de-denting out of a bbPyDefRegion? e.g. when the user - " types an obvious BB keyword like addhandler or addtask, or starts - " writing a shell task. Maybe too hard to implement... - - return -1 -endfunction diff --git a/bitbake/contrib/vim/plugin/newbb.vim b/bitbake/contrib/vim/plugin/newbb.vim deleted file mode 100644 index 3a42027361..0000000000 --- a/bitbake/contrib/vim/plugin/newbb.vim +++ /dev/null @@ -1,88 +0,0 @@ -" Vim plugin file -" Purpose: Create a template for new bb files -" Author: Ricardo Salveti -" Copyright: Copyright (C) 2008 Ricardo Salveti -" -" This file is licensed under the MIT license, see COPYING.MIT in -" this source distribution for the terms. -" -" Based on the gentoo-syntax package -" -" Will try to use git to find the user name and email - -if &compatible || v:version < 600 || exists("b:loaded_bitbake_plugin") - finish -endif - -fun! GetUserName() - let l:user_name = system("git config --get user.name") - if v:shell_error - return "Unknown User" - else - return substitute(l:user_name, "\n", "", "") -endfun - -fun! GetUserEmail() - let l:user_email = system("git config --get user.email") - if v:shell_error - return "unknown@user.org" - else - return substitute(l:user_email, "\n", "", "") -endfun - -fun! BBHeader() - let l:current_year = strftime("%Y") - let l:user_name = GetUserName() - let l:user_email = GetUserEmail() - 0 put ='# Copyright (C) ' . l:current_year . - \ ' ' . l:user_name . ' <' . l:user_email . '>' - put ='# Released under the MIT license (see COPYING.MIT for the terms)' - $ -endfun - -fun! NewBBTemplate() - if line2byte(line('$') + 1) != -1 - return - endif - - let l:paste = &paste - set nopaste - - " Get the header - call BBHeader() - - " New the bb template - put ='SUMMARY = \"\"' - put ='HOMEPAGE = \"\"' - put ='LICENSE = \"\"' - put ='SECTION = \"\"' - put ='DEPENDS = \"\"' - put ='' - put ='SRC_URI = \"\"' - - " Go to the first place to edit - 0 - /^SUMMARY =/ - exec "normal 2f\"" - - if paste == 1 - set paste - endif -endfun - -if !exists("g:bb_create_on_empty") - let g:bb_create_on_empty = 1 -endif - -" disable in case of vimdiff -if v:progname =~ "vimdiff" - let g:bb_create_on_empty = 0 -endif - -augroup NewBB - au BufNewFile,BufReadPost *.bb - \ if g:bb_create_on_empty | - \ call NewBBTemplate() | - \ endif -augroup END - diff --git a/bitbake/contrib/vim/plugin/newbbappend.vim b/bitbake/contrib/vim/plugin/newbbappend.vim deleted file mode 100644 index 3f65f79cdc..0000000000 --- a/bitbake/contrib/vim/plugin/newbbappend.vim +++ /dev/null @@ -1,46 +0,0 @@ -" Vim plugin file -" Purpose: Create a template for new bbappend file -" Author: Joshua Watt -" Copyright: Copyright (C) 2017 Joshua Watt -" -" This file is licensed under the MIT license, see COPYING.MIT in -" this source distribution for the terms. -" - -if &compatible || v:version < 600 || exists("b:loaded_bitbake_plugin") - finish -endif - -fun! NewBBAppendTemplate() - if line2byte(line('$') + 1) != -1 - return - endif - - let l:paste = &paste - set nopaste - - " New bbappend template - 0 put ='FILESEXTRAPATHS:prepend := \"${THISDIR}/${PN}:\"' - 2 - - if paste == 1 - set paste - endif -endfun - -if !exists("g:bb_create_on_empty") - let g:bb_create_on_empty = 1 -endif - -" disable in case of vimdiff -if v:progname =~ "vimdiff" - let g:bb_create_on_empty = 0 -endif - -augroup NewBBAppend - au BufNewFile,BufReadPost *.bbappend - \ if g:bb_create_on_empty | - \ call NewBBAppendTemplate() | - \ endif -augroup END - diff --git a/bitbake/contrib/vim/syntax/bitbake.vim b/bitbake/contrib/vim/syntax/bitbake.vim deleted file mode 100644 index 8f39b8f951..0000000000 --- a/bitbake/contrib/vim/syntax/bitbake.vim +++ /dev/null @@ -1,131 +0,0 @@ -" Vim syntax file -" Language: BitBake bb/bbclasses/inc -" Author: Chris Larson -" Ricardo Salveti -" Copyright: Copyright (C) 2004 Chris Larson -" Copyright (C) 2008 Ricardo Salveti -" -" This file is licensed under the MIT license, see COPYING.MIT in -" this source distribution for the terms. -" -" Syntax highlighting for bb, bbclasses and inc files. -" -" It's an entirely new type, just has specific syntax in shell and python code - -if &compatible || v:version < 600 || exists("b:loaded_bitbake_plugin") - finish -endif -if exists("b:current_syntax") - finish -endif - -syn include @python syntax/python.vim -if exists("b:current_syntax") - unlet b:current_syntax -endif - -" BitBake syntax - -" Matching case -syn case match - -" Indicates the error when nothing is matched -syn match bbUnmatched "." - -" Comments -syn cluster bbCommentGroup contains=bbTodo,@Spell -syn keyword bbTodo COMBAK FIXME TODO XXX contained -syn match bbComment "#.*$" contains=@bbCommentGroup - -" String helpers -syn match bbQuote +['"]+ contained -syn match bbDelimiter "[(){}=]" contained -syn match bbArrayBrackets "[\[\]]" contained - -" BitBake strings -syn match bbContinue "\\$" -syn region bbString matchgroup=bbQuote start=+"+ skip=+\\$+ end=+"+ contained contains=bbTodo,bbContinue,bbVarDeref,bbVarPyValue,@Spell -syn region bbString matchgroup=bbQuote start=+'+ skip=+\\$+ end=+'+ contained contains=bbTodo,bbContinue,bbVarDeref,bbVarPyValue,@Spell - -" Vars definition -syn match bbExport "^export" nextgroup=bbIdentifier skipwhite -syn keyword bbExportFlag export contained nextgroup=bbIdentifier skipwhite -syn match bbIdentifier "[a-zA-Z0-9\-_\.\/\+]\+" display contained -syn match bbVarDeref "${[a-zA-Z0-9\-_:\.\/\+]\+}" contained -syn match bbVarEq "\(:=\|+=\|=+\|\.=\|=\.\|?=\|??=\|=\)" contained nextgroup=bbVarValue -syn match bbVarDef "^\(export\s*\)\?\([a-zA-Z0-9\-_\.\/\+][${}a-zA-Z0-9\-_:\.\/\+]*\)\s*\(:=\|+=\|=+\|\.=\|=\.\|?=\|??=\|=\)\@=" contains=bbExportFlag,bbIdentifier,bbOverrideOperator,bbVarDeref nextgroup=bbVarEq -syn match bbVarValue ".*$" contained contains=bbString,bbVarDeref,bbVarPyValue -syn region bbVarPyValue start=+${@+ skip=+\\$+ end=+}+ contained contains=@python - -" Vars metadata flags -syn match bbVarFlagDef "^\([a-zA-Z0-9\-_\.]\+\)\(\[[a-zA-Z0-9\-_\.+]\+\]\)\@=" contains=bbIdentifier nextgroup=bbVarFlagFlag -syn region bbVarFlagFlag matchgroup=bbArrayBrackets start="\[" end="\]\s*\(:=\|=\|.=\|=.|+=\|=+\|?=\)\@=" contained contains=bbIdentifier nextgroup=bbVarEq - -" Includes and requires -syn keyword bbInclude inherit include require contained -syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref,bbVarPyValue -syn match bbIncludeLine "^\(inherit\|include\|require\)\s\+" contains=bbInclude nextgroup=bbIncludeRest - -" Add taks and similar -syn keyword bbStatement addtask deltask addhandler after before EXPORT_FUNCTIONS contained -syn match bbStatementRest /[^\\]*$/ skipwhite contained contains=bbStatement,bbVarDeref,bbVarPyValue -syn region bbStatementRestCont start=/.*\\$/ end=/^[^\\]*$/ contained contains=bbStatement,bbVarDeref,bbVarPyValue,bbContinue keepend -syn match bbStatementLine "^\(addtask\|deltask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest,bbStatementRestCont - -" OE Important Functions -syn keyword bbOEFunctions do_fetch do_unpack do_patch do_configure do_compile do_stage do_install do_package contained - -" Generic Functions -syn match bbFunction "\h[0-9A-Za-z_\-\.]*" display contained contains=bbOEFunctions - -syn keyword bbOverrideOperator append prepend remove contained - -" BitBake shell metadata -syn include @shell syntax/sh.vim -if exists("b:current_syntax") - unlet b:current_syntax -endif -syn keyword bbShFakeRootFlag fakeroot contained -syn match bbShFuncDef "^\(fakeroot\s*\)\?\([\.0-9A-Za-z_:${}\-\.]\+\)\(python\)\@ - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/bitbake/doc/COPYING.MIT b/bitbake/doc/COPYING.MIT deleted file mode 100644 index 7e7d57413d..0000000000 --- a/bitbake/doc/COPYING.MIT +++ /dev/null @@ -1,17 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR -THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/bitbake/doc/Makefile b/bitbake/doc/Makefile deleted file mode 100644 index 996f01b7d5..0000000000 --- a/bitbake/doc/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -W --keep-going -j auto -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build -DESTDIR = final - -ifeq ($(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi),0) -$(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed") -endif - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile clean publish - -publish: Makefile html singlehtml - rm -rf $(BUILDDIR)/$(DESTDIR)/ - mkdir -p $(BUILDDIR)/$(DESTDIR)/ - cp -r $(BUILDDIR)/html/* $(BUILDDIR)/$(DESTDIR)/ - cp $(BUILDDIR)/singlehtml/index.html $(BUILDDIR)/$(DESTDIR)/singleindex.html - sed -i -e 's@index.html#@singleindex.html#@g' $(BUILDDIR)/$(DESTDIR)/singleindex.html - -clean: - @rm -rf $(BUILDDIR) - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/bitbake/doc/README b/bitbake/doc/README deleted file mode 100644 index d4f56afa37..0000000000 --- a/bitbake/doc/README +++ /dev/null @@ -1,55 +0,0 @@ -Documentation -============= - -This is the directory that contains the BitBake documentation. - -Manual Organization -=================== - -Folders exist for individual manuals as follows: - -* bitbake-user-manual --- The BitBake User Manual - -Each folder is self-contained regarding content and figures. - -If you want to find HTML versions of the BitBake manuals on the web, -go to https://www.openembedded.org/wiki/Documentation. - -Sphinx -====== - -The BitBake documentation was migrated from the original DocBook -format to Sphinx based documentation for the Yocto Project 3.2 -release. - -Additional information related to the Sphinx migration, and guidelines -for developers willing to contribute to the BitBake documentation can -be found in the Yocto Project Documentation README file: - -https://git.yoctoproject.org/cgit/cgit.cgi/yocto-docs/tree/documentation/README - -How to build the Yocto Project documentation -============================================ - -Sphinx is written in Python. While it might work with Python2, for -obvious reasons, we will only support building the BitBake -documentation with Python3. - -Sphinx might be available in your Linux distro packages repositories, -however it is not recommend using distro packages, as they might be -old versions, especially if you are using an LTS version of your -distro. The recommended method to install Sphinx and all required -dependencies is to use the Python Package Index (pip). - -To install all required packages run: - - $ pip3 install sphinx sphinx_rtd_theme pyyaml - -To build the documentation locally, run: - - $ cd doc - $ make html - -The resulting HTML index page will be _build/html/index.html, and you -can browse your own copy of the locally generated documentation with -your browser. diff --git a/bitbake/doc/_templates/breadcrumbs.html b/bitbake/doc/_templates/breadcrumbs.html deleted file mode 100644 index eb6244b74c..0000000000 --- a/bitbake/doc/_templates/breadcrumbs.html +++ /dev/null @@ -1,14 +0,0 @@ -{% extends "!breadcrumbs.html" %} - -{% block breadcrumbs %} -
  • - {{ doctype or 'single' }} - {{ release }} -
  • -
  • »
  • - {% for doc in parents %} -
  • {{ doc.title }} »
  • - {% endfor %} -
  • {{ title }}
  • -{% endblock %} - diff --git a/bitbake/doc/_templates/footer.html b/bitbake/doc/_templates/footer.html deleted file mode 100644 index 1398f20d7e..0000000000 --- a/bitbake/doc/_templates/footer.html +++ /dev/null @@ -1,9 +0,0 @@ -
    -
    -
    -

    © Copyright {{ copyright }} -
    Last updated on {{ last_updated }} from the bitbake git repository. -

    -
    -
    - diff --git a/bitbake/doc/_templates/layout.html b/bitbake/doc/_templates/layout.html deleted file mode 100644 index 308d5c7a28..0000000000 --- a/bitbake/doc/_templates/layout.html +++ /dev/null @@ -1,7 +0,0 @@ -{% extends "!layout.html" %} - -{% block extrabody %} -
    -
    -{% endblock %} - diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-environment-setup.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-environment-setup.rst deleted file mode 100644 index 66c0b0fa68..0000000000 --- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-environment-setup.rst +++ /dev/null @@ -1,822 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -============================================= -Setting Up The Environment With bitbake-setup -============================================= - -| - -Setting up layers and configuring builds can be done with the -``bitbake-setup`` tool. This tool acts as a top-level utility which can perform -the following tasks: - -- Parse a JSON configuration file that describes what layers and which snippets - of configuration to use. - -- Clone the layers onto the versions specified in the configuration file. - -- Create and setup a directory ready for building what is specified in - configuration files. - -- Behave according to global or per-project settings. - -- Synchronize with upstream configuration changes. - -Quick Start -=========== - -#. ``bitbake-setup`` is part of the BitBake repository under - ``./bin/bitbake-setup``. To start, run: - - .. code-block:: shell - - $ ./bin/bitbake-setup init - - This command will ask you to choose which configurations to use available as - part of the default BitBake :term:`generic configurations `. - - .. note:: - - These default configurations are located in ``./bitbake/default-registry/``. - See the :ref:`ref-bbsetup-section-config-reference` section to learn more - about ``bitbake-setup`` input configuration files. - -#. With the default choices, the ``bitbake-setup init`` command creates the - following directories:: - - ~/bitbake-builds/ - └── poky-master-poky-distro_poky-machine_qemux86-64/ -    ├── build/ -    ├── config/ -    └── layers/ - - With: - - - ``~/bitbake-build``: the :term:`Top Directory`, where ``bitbake-setup`` - configures everything. This directory can be configured with the - :ref:`ref-bbsetup-setting-top-dir-prefix` and - :ref:`ref-bbsetup-setting-top-dir-name` settings. - - - ``poky-master-poky-distro_poky-machine_qemux86-64``: a :term:`Setup` - directory, which holds a :term:`Setup`: a result of the choices made - during the ``bitbake-setup init`` execution. - - The name of the directory will vary depending on the choices. - - - ``config/``: holds the :term:`Specific Configuration`, which embeds the - :term:`Generic Configuration` (first choice of the :ref:`ref-bbsetup-command-init` command) - and the choices made during the initialization. - - - ``build/``: the :term:`BitBake Build` directory, where BitBake stores - its own configuration and outputs for the builds. - - - ``layers/``: the directory where :ref:`layers - ` and other - repositories managed by ``bitbake-setup`` are stored and updated. - -#. Source the ``init-build-env`` file present in the :term:`BitBake Build` - directory: - - .. code-block:: shell - - $ source ./poky-master-poky-distro_poky-machine_qemux86-64/build/init-build-env - - This command will prepare your current shell with the BitBake environment. - -#. You can then start running ``bitbake`` in the current shell. For more information - on how to use ``bitbake``, see the :doc:`/bitbake-user-manual/bitbake-user-manual-execution` - section of this manual. - -Terminology -=========== - -The ``bitbake-setup`` tool revolves around some common terms we define in this -section. - -``bitbake-setup`` works with a specific hierarchy of directories, that can be -represented as follows:: - - Top Directory - ├── Setup 1 - │   ├── build/ - │ ├── config/ - │   └── layers/ - ├── Setup 2 - │   ├── build/ - │ ├── config/ - │   └── layers/ - ... - -The "Top Directory" and "Setup" directories are defined as follows: - -.. glossary:: - :term:`Top Directory` - The top directory is the working directory of ``bitbake-setup``, where its - outputs end-up (unless otherwise configured by :term:`settings` such as - :ref:`ref-bbsetup-setting-dl-dir`). - - The location of this directory can be changed with the - :ref:`ref-bbsetup-setting-top-dir-prefix` and - :ref:`ref-bbsetup-setting-top-dir-name` settings. - - The top directory contains one or more :term:`Setup` directories, each of - them containing a :term:`Setup`. - - :term:`Setup` - A Setup is the result of the :ref:`ref-bbsetup-command-init` - command, which creates a :term:`Setup` directory. It is constructed from a - :term:`Generic Configuration` and choices made during the ``init`` command. - - It contains at least: - - - A :term:`BitBake Build` (``build/`` directory). - - A :term:`Specific Configuration` (``config/`` directory). - - Sources such as :ref:`layers - ` or other - repositories managed by ``bitbake-setup`` (``layers/`` directory). - -The following components are involved to create the content of these directories: - -.. glossary:: - :term:`BitBake Build` - A BitBake Build is a sub-tree inside a :term:`Setup` that BitBake itself - operates on. The files in the ``conf/`` directory of a :term:`BitBake - Build` constitute the :ref:`BitBake configuration - `. - - :term:`Generic Configuration` - A Generic Configuration is a file in JSON format containing a template to - create a :term:`Setup`. These files are used during the :ref:`ref-bbsetup-command-init` - command as a starting point to configure the :term:`Setup`. When the - command runs, the user may be prompted with choices to further specify the - :term:`Setup` to create. - - It is also possible to specify the choices on the command line for a - completely non-interactive initialization. - - :term:`Generic Configuration` files are stored in :term:`registries - `, and can be listed with the :ref:`ref-bbsetup-command-list` - command. - - :term:`Generic Configuration` files must end with the ``.conf.json`` - suffix for ``bitbake-setup`` to locate them. - - .. note:: - - The default :term:`Generic Configurations ` are - located in the BitBake repository in a local registry. the - ``default-registry/`` directory. This can be modified with the - :ref:`ref-bbsetup-setting-registry` setting. - - :ref:`ref-bbsetup-command-status` will tell if a :term:`Setup` - is in sync with the :term:`Generic Configuration` it was constructed from - (typically: layer updates). - - :ref:`ref-bbsetup-command-update` will bring a :term:`Setup` - in sync with its :term:`Generic Configuration`. - - :term:`Specific Configuration` - The :term:`Specific Configuration` is stored in the ``config/`` directory - in a :term:`Setup`. It embeds the :term:`Generic Configuration` and the - choices made during the initialization. - - It is also a Git repository, that contains a history of the specific - configuration and updates made to it via :ref:`ref-bbsetup-command-update`. - - :term:`Registry` - A configuration registry is a place where one or more :term:`Generic - Configurations ` are stored. - - The directory structure of the registry can be any: ``bitbake-setup`` - recursively find files ending with ``.conf.json`` and consider it a - :term:`Generic Configuration`. - - The registry location is configured through the - :ref:`ref-bbsetup-setting-registry` setting. This location can be the URL to - a Git repository, a local directory, or any URI supported by the BitBake - fetcher (see the :doc:`/bitbake-user-manual/bitbake-user-manual-fetching` - section for more information on fetchers). - - :term:`Settings` - Settings are operational parameters that are global to all builds under a - :term:`Top Directory`, stored in a ``settings.conf`` file. For example, - this could be the location of the configuration registry, or where the - BitBake fetcher should store the downloads. - - There are also global settings, common to all top directories that are - stored in ``~/.config/bitbake-setup/settings.conf``. - - See the :ref:`bitbake-user-manual/bitbake-user-manual-environment-setup:Settings` - section to see the supported settings and where they can be stored. - - :term:`Source Override` - A source override is a JSON file that can be used to modify revisions and - origins of layers or other sources that need to be checked out into a - :term:`Setup` (in the ``layers/`` directory). It can be useful for example - when the master branches need to be changed to master-next for the purpose - of testing, or to set up a CI pipeline that tests code in a pull request - coming from a developer's repository and branch. - - Such a file is specified with a command-line option to - :ref:`ref-bbsetup-command-init`. - - See the :ref:`ref-bbsetup-source-overrides` section for more information on - the format of these files. - -The ``bitbake-setup`` command -============================= - -The ``bitbake-setup`` program has general options and sub-commands. These can be -obtained using ``bitbake-setup --help``. - -The general options, common to all commands, are: - -- ``-h`` or ``--help``: Show the help message and exit. -- ``-d`` or ``--debug``: Enable debug outputs. -- ``-q`` or ``--quiet``: Print only errors. -- ``--color``: Colorize output (where COLOR is auto, always, never). -- ``--no-network``: Do not check whether configuration repositories and layer - repositories have been updated; use only the local cache. -- ``--global-settings``: Path to the global settings file. -- ``--setting``: Modify a setting (for this bitbake-setup invocation only). - For example ``--setting default top-dir-prefix /path/to/top-dir``. - -.. _ref-bbsetup-command-init: - -``bitbake-setup init`` ----------------------- - -The ``bitbake-setup init`` sub-command helps initializing a :term:`Setup`. - -This command can be run without any arguments to prompt the user with -configuration options to choose from. These configuration options are taken from -the input :term:`Generic Configuration` files in the :term:`registry`. - -.. note:: - - The registry location can be set with the :ref:`ref-bbsetup-setting-registry` - setting and the :ref:`ref-bbsetup-command-settings` command. - -Otherwise, the first argument to :ref:`ref-bbsetup-command-init` can be: - -- A generic configuration ID in the registry. -- A path to a generic configuration file on a local disk. -- An HTTP URI to the generic configuration file. - -The choices made during the bare ``bitbake-setup init`` command can also be -passed directly on the command-line, for example:: - - bitbake-setup init poky distro/poky-tiny ... - -``bitbake-setup`` will stop and ask to make a choice if the above command does -not contain all of the required configurations to complete the sequence of -choices. - -In addition, the command can take the following arguments: - -- ``--non-interactive``: can be used to create :term:`Setups ` - without interactions from the user. The command will fail if not all the - required choices are provided in the command. - -- ``--source-overrides``: can be used to pass one or more - :ref:`source override `. See the - :ref:`ref-bbsetup-source-overrides` section. - -- ``--setup-dir-name``: can be used to configure the name of the :term:`Setup` - directory. - -- ``--skip-selection``: can be used to skip some of the choices - (which may result in an incomplete :term:`Setup`!) - -``bitbake-setup init`` Examples -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- This example prompts the user to choose a :term:`Generic Configuration` from - a custom registry: - - .. code-block:: shell - - $ ./bitbake/bin/bitbake-setup \ - --setting default registry 'git://example.com/bitbake-setup-configurations.git;protocol=https;branch=main;rev=main' \ - init - -- This example takes a :term:`Generic Configuration` from a remote location - (here, one of the default configuration in BitBake): - - .. code-block:: shell - - $ ./bitbake/bin/bitbake-setup init https://git.openembedded.org/bitbake/plain/default-registry/configurations/oe-nodistro.conf.json - -- This example initializes a :term:`Setup` with: - - - A custom :ref:`ref-bbsetup-setting-top-dir-prefix` and :ref:`ref-bbsetup-setting-top-dir-name` - - A :term:`source override`. - - A standalone :term:`generic configuration` file. - - Choices passed on the command-line, applied non-interactively. - - .. code-block:: shell - - $ ./bitbake/bin/bitbake-setup \ - --setting default top-dir-prefix /work/bitbake-setup \ - --setting default top-dir-name custom-project \ - init \ - --non-interactive \ - --source-overrides develop-branch.json \ - ./gadget_master.conf.json \ - gadget distro/gadget machine/gadget - -.. _ref-bbsetup-command-list: - -``bitbake-setup list`` ----------------------- - -The ``bitbake-setup list`` sub-command lists the available :term:`generic -configurations ` in the current :term:`registry`. - -In addition, the command can take the following arguments: - -- ``--with-expired``: list the expired configuration (e.g. older Yocto releases - that have reached their End-Of-Life dates). - -- ``--write-json``: write the configurations into a JSON file so they can be - programmatically processed. - -.. _ref-bbsetup-command-status: - -``bitbake-setup status`` ------------------------- - -The ``bitbake-setup status`` sub-command shows the status of a -:term:`Setup`. Any differences between the local copy of the :term:`generic -configuration` and the upstream one are printed on the console. - -If the BitBake environment is sourced and ready to build, the ``bitbake-setup -status`` command (without any arguments) will show the status of the current -:term:`Setup`. - -In addition, the command can take the following arguments: - -- ``--setup-dir``: path to the :term:`Setup` to check to status for. Not - required if the command is invoked from an initialized BitBake environment - that contains :term:`BBPATH`. - -.. _ref-bbsetup-command-update: - -``bitbake-setup update`` ------------------------- - -The ``bitbake-setup update`` sub-command updates a :term:`Setup` to sync with -the latest changes from a :term:`generic configuration` it was constructed from. -The :ref:`ref-bbsetup-command-status` command can be used to show the current -status of the :term:`Setup` before updating it. - -In addition, the command can take the following arguments: - -- ``--setup-dir``: path to the :term:`Setup` to update. Not required if the - command is invoked from an initialized BitBake environment that contains - :term:`BBPATH`. - -.. _ref-bbsetup-command-install-buildtools: - -``bitbake-setup install-buildtools`` ------------------------------------- - -The ``bitbake-setup install-buildtools`` sub-command installs and extracts a -buildtools tarball into the specified :term:`Setup`. - -After completion, help is printed to help the user on how to use the extracted -tarball. - -.. note:: - - The purpose of the Buildtools tarball is to provide tools needed to run - BitBake on build machines where such tools cannot be easily obtained from the - host Linux distribution (for example on older distribution versions that do - not contain a recent enough GCC compiler or Python interpreter, or machines - where the user running BitBake cannot easily install additional software into - the system). This command requires that the OpenEmbedded-core layer is - present in the BitBake configuration. - - See https://docs.yoctoproject.org/ref-manual/system-requirements.html#required-git-tar-python-make-and-gcc-versions - for more information. - -In addition, the command can take the following arguments: - -- ``--force``: force the re-installation of the tarball. - -- ``--setup-dir``: path to the :term:`Setup` to check to status for. Not - required if :term:`BBPATH` is already configured. - -.. _ref-bbsetup-command-settings: - -``bitbake-setup settings`` --------------------------- - -The ``bitbake-setup settings`` sub-command helps modifying the settings of -``bitbake-setup``. This sub-command has sub-commands itself: - -- ``bitbake-setup settings list`` lists the current settings and their value. -- ``bitbake-setup settings set`` sets a setting. -- ``bitbake-setup settings unset`` removes a setting. - -Settings must be set with a section and a value, for example:: - - bitbake-setup setting set default top-dir-name bitbake-builds - -Will set the value of ``top-dir-name`` in the ``default`` section to -"bitbake-builds". - -In addition, the command can take the following arguments: - -- ``--global``: write to the global settings - (``~/.config/bitbake-setup/settings.conf``) instead of the :term:`Top - Directory` settings. - -See the :ref:`bitbake-user-manual/bitbake-user-manual-environment-setup:Settings` -section to see the supported settings. - -.. note:: - - The supported setting listed in the - :ref:`bitbake-user-manual/bitbake-user-manual-environment-setup:Settings` - section are only affected when set in the ``default`` section. - -Settings -======== - -The settings allow configuring ``bitbake-setup``. Settings are stored in a file -named ``settings.conf``, in :wikipedia:`INI ` format. - -There are multiple locations for storing settings. Settings in different -locations can override each other, but the final value of a setting is computed -from reading the files in this order: - -#. Global settings file: ``~/.config/bitbake-setup/settings.conf``. - -#. Local settings file, taken from a ``settings.conf`` file in the :term:`Top - Directory`. - -#. Command-line settings, passed with the ``--setting`` argument. - -A valid settings file would for example be: - -.. code-block:: ini - - [default] - top-dir-prefix = /path/to/workspace - top-dir-name = bitbake-builds - registry = /path/to/bitbake/default-registry - dl-dir = /path/to/bitbake-setup-downloads - -Settings and their values can be listed and modified with the ``bitbake-setup -settings`` command. See the :ref:`ref-bbsetup-command-settings` section for -more information. - -Below are the available settings. - -.. _ref-bbsetup-setting-top-dir-prefix: - -``top-dir-prefix`` ------------------- - -The :ref:`ref-bbsetup-setting-top-dir-prefix` setting helps configuring the -leftmost part of the path to the :term:`Top Directory`. - -For example, with: - -.. code-block:: ini - - [default] - top-dir-prefix = /path/to/workspace - -The :term:`top directory` would be ``/path/to/workspace/`` with -the ```` corresponding to the :ref:`ref-bbsetup-setting-top-dir-name` -setting. - -This is most useful to customize on systems where the default location of the -:term:`Top Directory` (``~/bitbake-builds``) is not suitable, and there is a -dedicated directory for builds somewhere else. - -.. _ref-bbsetup-setting-top-dir-name: - -``top-dir-name`` ----------------- - -The :ref:`ref-bbsetup-setting-top-dir-name` setting helps configuring the -rightmost part of the path to the :term:`Top Directory`. - -For example, with: - -.. code-block:: ini - - [default] - top-dir-name = builds - -The :term:`top directory` would be ``/builds`` with -the ```` corresponding to the :ref:`ref-bbsetup-setting-top-dir-prefix` -setting. - -.. _ref-bbsetup-setting-registry: - -``registry`` ------------- - -The :ref:`ref-bbsetup-setting-registry` setting sets the URI location of the -registry. This URI can be any URI supported by the BitBake fetcher. - -A local registry would be configured as follows: - -.. code-block:: ini - - [default] - registry = /path/to/registry - -When using another fetcher, it must be specified in the URI scheme. For example: - -.. code-block:: ini - - [default] - registry = git://example.com/bitbake-setup-configurations;protocol=https;branch=master;rev=master - -This would fetch the remote configurations from a remote Git remote repository, -on the ``master`` branch. - -See the :doc:`/bitbake-user-manual/bitbake-user-manual-fetching` section for more -information on BitBake fetchers. - -.. _ref-bbsetup-setting-dl-dir: - -``dl-dir`` ----------- - -The :ref:`ref-bbsetup-setting-dl-dir` setting sets the location of the download -cache that ``bitbake-setup`` will configure for the purpose of downloading -configuration repositories, layers and other sources using BitBake fetchers. -Please see :doc:`/bitbake-user-manual/bitbake-user-manual-fetching` and the -:term:`DL_DIR` variable for more information. - -The location can be set such that it is shared with :term:`DL_DIR` specified by -BitBake builds, so that there is a single directory containing a copy of -everything needed to set up and run a BitBake build offline in a reproducible -manner. - -.. _ref-bbsetup-section-config-reference: - -Generic Configuration Files Reference -===================================== - -:term:`Generic Configurations ` are the input files given -to ``bitbake-setup`` to configure :term:`Setups `. - -These files are written in the JSON file format and are stored in a -:term:`Registry`. They can also be standalone files directly passed to the -:ref:`ref-bbsetup-command-init` command: - -.. code-block:: shell - - $ bitbake-setup init /path/to/config.conf.json - -They contain the following sections: - -- ``version`` (**required**): version of the configuration file. - - Example: - - .. code-block:: json - :force: - - { - "version": "1.0" - } - -- ``description`` (**required**): the description of the configuration. - - Example: - - .. code-block:: json - :force: - - { - "description": "OpenEmbedded - 'nodistro' basic configuration" - } - -- ``sources`` (*optional*): Git repositories to fetch. - - Example: - - .. code-block:: json - :force: - - { - "sources": { - "bitbake": { - "git-remote": { - "remotes": { - "origin": { - "uri": "git://git.openembedded.org/bitbake;protocol=https" - } - }, - "branch": "master", - "rev": "master" - }, - "path": "bitbake" - } - } - } - - Sources can be specified with the following options: - - - ``uri`` (**required**): a URI that follows the BitBake Git fetcher syntax. - See the :doc:`/bitbake-user-manual/bitbake-user-manual-fetching` section - for more information on the Git fetcher. - - - ``rev`` (**required**): the revision to checkout. Can be the name of the - branch to checkout on the latest revision of the specified ``branch``. - - If the value is the branch name, ``bitbake-setup`` will check out the - latest revision on that branch, and keep it updated when using the - :ref:`ref-bbsetup-command-update` command. - - - ``branch`` (**required**): the Git branch, used to check that the - specified ``rev`` is indeed on that branch. - - - ``path`` is where the source is extracted. - -- ``expires`` (*optional*): Expiration date of the configuration. This date - should be in :wikipedia:`ISO 8601 ` format (``YYYY-MM-DDTHH:MM:SS``). - -- ``bitbake-setup`` (**required**): contains a list of configurations. - - Example: - - .. code-block:: json - - { - "bitbake-setup": { - "configurations": [ - { - "bb-layers": ["openembedded-core/meta","meta-yocto/meta-yocto-bsp","meta-yocto/meta-poky"], - "bb-env-passthrough-additions": ["DL_DIR","SSTATE_DIR"], - "oe-fragments-one-of": { - "machine": { - "description": "Target machines", - "options" : ["machine/qemux86-64", "machine/qemuarm64", "machine/qemuriscv64", "machine/genericarm64", "machine/genericx86-64"] - }, - "distro": { - "description": "Distribution configuration variants", - "options" : ["distro/poky", "distro/poky-altcfg", "distro/poky-tiny"] - } - }, - "configurations": [ - { - "name": "poky", - "description": "Poky - The Yocto Project testing distribution" - }, - { - "name": "poky-with-sstate", - "description": "Poky - The Yocto Project testing distribution with internet sstate acceleration. Use with caution as it requires a completely robust local network with sufficient bandwidth.", - "oe-fragments": ["core/yocto/sstate-mirror-cdn"] - } - ] - } - ] - } - } - - Configurations can be specified with the following options: - - - ``name`` (**required**): the name of this configuration snippet. This is - what is prompted during the :ref:`ref-bbsetup-command-init` command - execution. - - - ``description`` (**required**): the description of this configuration - snippet. This is what is prompted during the - :ref:`ref-bbsetup-command-init` command execution. - - - ``configurations``: Configurations can recursively contain as many nested - configurations as needed. This will create more choices when running the - :ref:`ref-bbsetup-command-init` command. - - The purpose of such nesting is to be able to scale the configurations, for - example when there is a need to create multiple configurations that share - some parameters (which are specified in their common parent), but differ - between themselves in other parameters. ``bitbake-setup`` will assemble - configuration choices by putting together information from a leaf - configuration and all of its ancestors. - - - ``bb-env-passthrough-additions`` (*optional*): List of environment - variables to include in :term:`BB_ENV_PASSTHROUGH_ADDITIONS`. - - - ``bb-layers`` (*optional*): List of layers to add to the ``bblayers.conf`` - file. Paths in this list are relative to the ``layers/`` directory of a - :term:`Setup`. - - The ``bb-layers`` keyword cannot be used in conjunction with the - ``oe-template`` option, as the ``bblayers.conf`` file comes from the - template itself. - - - ``bb-layers-file-relative`` (*optional*): List of layers that are not - managed by ``bitbake-setup`` but that need to be included as part of the - ``bblayers.conf`` file. Paths in this list are relative to the - configuration file. - - This is useful when (one or more) configuration files and (one or - more) layers are hosted in the same Git repository, which is cloned - and managed independently from bitbake-setup workflows. For example:: - - ├── meta-myproject/ - └── myproject.conf.json - - Then ``myproject.conf.json`` can contain the following to add - ``meta-myproject`` to ``bblayers.conf``:: - - { - ... - "bb-layers-file-relative": [ - "meta-myproject" - ], - ... - } - - The ``bb-layers-file-relative`` keyword cannot be used in conjunction with the - ``oe-template`` keyword, as the ``bblayers.conf`` file comes from the - template itself. - - - ``oe-template`` (*optional*, OpenEmbedded specific): OpenEmbedded template - to use. This cannot be used in conjunction with the - ``bb-layers`` or ``bb-layers-file-relative`` keywords as it - already provides a ready ``bblayers.conf`` file to use. - - See https://docs.yoctoproject.org/dev-manual/custom-template-configuration-directory.html - for more information of OpenEmbedded templates. - - - ``oe-fragments-one-of`` (*optional*, OpenEmbedded specific): the OpenEmbedded - fragments to select as part of the build. - - This will trigger choices to make during the - :ref:`ref-bbsetup-command-init` command execution. - - See https://docs.yoctoproject.org/dev/ref-manual/fragments.html for - more information of OpenEmbedded configuration fragments. - - - ``oe-fragments`` (*optional*, OpenEmbedded specific): fragments to select - as part of the build. - - See https://docs.yoctoproject.org/dev/ref-manual/fragments.html for - more information of OpenEmbedded configuration fragments. - -Generic Configuration Examples ------------------------------- - -OpenEmbedded "nodistro" configuration for master branches: - -.. literalinclude:: ../../default-registry/configurations/oe-nodistro.conf.json - :language: json - -Poky distribution configuration for master branches: - -.. literalinclude:: ../../default-registry/configurations/poky-master.conf.json - :language: json - -.. _ref-bbsetup-source-overrides: - -Source Overrides -================ - -See the definition of :term:`Source Override` in the Terminology section. - -These files are written in the JSON file format and are optionally passed to the -``--source-overrides`` argument of the :ref:`ref-bbsetup-command-init` command. -The ``--source-overrides`` option can be passed multiple times, in which case the -overrides are applied in the order specified in the command-line. - -Here is an example file that overrides the branch of the BitBake repository to -"master-next": - -.. code-block:: json - - { - "description": "Source override file", - "sources": { - "bitbake": { - "git-remote": { - "branch": "master-next", - "remotes": { - "origin": { - "uri": "git://git.openembedded.org/bitbake;protocol=https" - } - }, - "rev": "master-next" - } - } - }, - "version": "1.0" - } - -- The ``version`` parameter contains the version of the used configuration, and - should match the one of the :term:`Generic Configuration` file in use. - -- The ``sources`` section contains the same options as the ``sources`` option - of a :term:`Generic Configuration` file. See the - :ref:`ref-bbsetup-section-config-reference` section for more information. diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.rst deleted file mode 100644 index d407f59c0d..0000000000 --- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.rst +++ /dev/null @@ -1,761 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -========= -Execution -========= - -| - -The primary purpose for running BitBake is to produce some kind of -output such as a single installable package, a kernel, a software -development kit, or even a full, board-specific bootable Linux image, -complete with bootloader, kernel, and root filesystem. Of course, you -can execute the ``bitbake`` command with options that cause it to -execute single tasks, compile single recipe files, capture or clear -data, or simply return information about the execution environment. - -This chapter describes BitBake's execution process from start to finish -when you use it to create an image. The execution process is launched -using the following command form:: - - $ bitbake target - -For information on -the BitBake command and its options, see ":ref:`The BitBake Command -`" section. - -.. note:: - - Prior to executing BitBake, you should take advantage of available - parallel thread execution on your build host by setting the - :term:`BB_NUMBER_THREADS` variable in - your project's ``local.conf`` configuration file. - - A common method to determine this value for your build host is to run - the following:: - - $ grep processor /proc/cpuinfo - - This command returns - the number of processors, which takes into account hyper-threading. - Thus, a quad-core build host with hyper-threading most likely shows - eight processors, which is the value you would then assign to - :term:`BB_NUMBER_THREADS`. - - A possibly simpler solution is that some Linux distributions (e.g. - Debian and Ubuntu) provide the ``ncpus`` command. - -Parsing the Base Configuration Metadata -======================================= - -The first thing BitBake does is parse base configuration metadata. Base -configuration metadata consists of your project's ``bblayers.conf`` file -to determine what layers BitBake needs to recognize, all necessary -``layer.conf`` files (one from each layer), and ``bitbake.conf``. The -data itself is of various types: - -- **Recipes:** Details about particular pieces of software. - -- **Class Data:** An abstraction of common build information (e.g. how to - build a Linux kernel). - -- **Configuration Data:** Machine-specific settings, policy decisions, - and so forth. Configuration data acts as the glue to bind everything - together. - -The ``layer.conf`` files are used to construct key variables such as -:term:`BBPATH` and :term:`BBFILES`. :term:`BBPATH` is used to search for -configuration files under the ``conf`` directory and class files under the -``classes-global``, ``classes-recipe`` and ``classes`` directories. -:term:`BBFILES` is used to locate both recipe and recipe append files (``.bb`` -and ``.bbappend``). If there is no ``bblayers.conf`` file, it is assumed the -user has set the :term:`BBPATH` and :term:`BBFILES` directly in the environment. - -Next, the ``bitbake.conf`` file is located using the :term:`BBPATH` variable -that was just constructed. The ``bitbake.conf`` file may also include -other configuration files using the ``include`` or ``require`` -directives. - -Prior to parsing configuration files, BitBake looks at certain -variables, including: - -- :term:`BB_ENV_PASSTHROUGH` -- :term:`BB_ENV_PASSTHROUGH_ADDITIONS` -- :term:`BB_PRESERVE_ENV` -- :term:`BB_ORIGENV` -- :term:`BITBAKE_UI` - -The first four variables in this list relate to how BitBake treats shell -environment variables during task execution. By default, BitBake cleans -the environment variables and provides tight control over the shell -execution environment. However, through the use of these first four -variables, you can apply your control regarding the environment -variables allowed to be used by BitBake in the shell during execution of -tasks. See the -":ref:`bitbake-user-manual/bitbake-user-manual-metadata:Passing Information Into the Build Task Environment`" -section and the information about these variables in the variable -glossary for more information on how they work and on how to use them. - -The base configuration metadata is global and therefore affects all -recipes and tasks that are executed. - -BitBake first searches the current working directory for an optional -``conf/bblayers.conf`` configuration file. This file is expected to -contain a :term:`BBLAYERS` variable that is a -space-delimited list of 'layer' directories. Recall that if BitBake -cannot find a ``bblayers.conf`` file, then it is assumed the user has -set the :term:`BBPATH` and :term:`BBFILES` variables directly in the -environment. - -For each directory (layer) in this list, a ``conf/layer.conf`` file is -located and parsed with the :term:`LAYERDIR` variable -being set to the directory where the layer was found. The idea is these -files automatically set up :term:`BBPATH` and other -variables correctly for a given build directory. - -BitBake then expects to find the ``conf/bitbake.conf`` file somewhere in -the user-specified :term:`BBPATH`. That configuration file generally has -include directives to pull in any other metadata such as files specific -to the architecture, the machine, the local environment, and so forth. - -Only variable definitions and include directives are allowed in BitBake -``.conf`` files. Some variables directly influence BitBake's behavior. -These variables might have been set from the environment depending on -the environment variables previously mentioned or set in the -configuration files. The ":ref:`bitbake-user-manual/bitbake-user-manual-ref-variables:Variables Glossary`" -chapter presents a full list of -variables. - -After parsing configuration files, BitBake uses its rudimentary -inheritance mechanism, which is through class files, to inherit some -standard classes. BitBake parses a class when the inherit directive -responsible for getting that class is encountered. - -The ``base.bbclass`` file is always included. Other classes that are -specified in the configuration using the -:term:`INHERIT` variable are also included. BitBake -searches for class files in a ``classes`` subdirectory under the paths -in :term:`BBPATH` in the same way as configuration files. - -A good way to get an idea of the configuration files and the class files -used in your execution environment is to run the following BitBake -command:: - - $ bitbake -e > mybb.log - -Examining the top of the ``mybb.log`` -shows you the many configuration files and class files used in your -execution environment. - -.. note:: - - You need to be aware of how BitBake parses curly braces. If a recipe - uses a closing curly brace within the function and the character has - no leading spaces, BitBake produces a parsing error. If you use a - pair of curly braces in a shell function, the closing curly brace - must not be located at the start of the line without leading spaces. - - Here is an example that causes BitBake to produce a parsing error:: - - fakeroot create_shar() { - cat << "EOF" > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh - usage() - { - echo "test" - ###### The following "}" at the start of the line causes a parsing error ###### - } - EOF - } - - Writing the recipe this way avoids the error: - fakeroot create_shar() { - cat << "EOF" > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh - usage() - { - echo "test" - ###### The following "}" with a leading space at the start of the line avoids the error ###### - } - EOF - } - -Locating and Parsing Recipes -============================ - -During the configuration phase, BitBake will have set -:term:`BBFILES`. BitBake now uses it to construct a -list of recipes to parse, along with any append files (``.bbappend``) to -apply. :term:`BBFILES` is a space-separated list of available files and -supports wildcards. An example would be:: - - BBFILES = "/path/to/bbfiles/*.bb /path/to/appends/*.bbappend" - -BitBake parses each -recipe and append file located with :term:`BBFILES` and stores the values of -various variables into the datastore. - -.. note:: - - Append files are applied in the order they are encountered in BBFILES. - -For each file, a fresh copy of the base configuration is made, then the -recipe is parsed line by line. Any inherit statements cause BitBake to -find and then parse class files (``.bbclass``) using -:term:`BBPATH` as the search path. Finally, BitBake -parses in order any append files found in :term:`BBFILES`. - -One common convention is to use the recipe filename to define pieces of -metadata. For example, in ``bitbake.conf`` the recipe name and version -are used to set the variables :term:`PN` and -:term:`PV`:: - - PN = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}" - PV = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[1] or '1.0'}" - -In this example, a recipe called "something_1.2.3.bb" would set -:term:`PN` to "something" and :term:`PV` to "1.2.3". - -By the time parsing is complete for a recipe, BitBake has a list of -tasks that the recipe defines and a set of data consisting of keys and -values as well as dependency information about the tasks. - -BitBake does not need all of this information. It only needs a small -subset of the information to make decisions about the recipe. -Consequently, BitBake caches the values in which it is interested and -does not store the rest of the information. Experience has shown it is -faster to re-parse the metadata than to try and write it out to the disk -and then reload it. - -Where possible, subsequent BitBake commands reuse this cache of recipe -information. The validity of this cache is determined by first computing -a checksum of the base configuration data (see -:term:`BB_HASHCONFIG_IGNORE_VARS`) and -then checking if the checksum matches. If that checksum matches what is -in the cache and the recipe and class files have not changed, BitBake is -able to use the cache. BitBake then reloads the cached information about -the recipe instead of reparsing it from scratch. - -Recipe file collections exist to allow the user to have multiple -repositories of ``.bb`` files that contain the same exact package. For -example, one could easily use them to make one's own local copy of an -upstream repository, but with custom modifications that one does not -want upstream. Here is an example:: - - BBFILES = "/stuff/openembedded/*/*.bb /stuff/openembedded.modified/*/*.bb" - BBFILE_COLLECTIONS = "upstream local" - BBFILE_PATTERN_upstream = "^/stuff/openembedded/" - BBFILE_PATTERN_local = "^/stuff/openembedded.modified/" - BBFILE_PRIORITY_upstream = "5" - BBFILE_PRIORITY_local = "10" - -.. note:: - - The layers mechanism is now the preferred method of collecting code. - While the collections code remains, its main use is to set layer - priorities and to deal with overlap (conflicts) between layers. - -.. _bb-bitbake-providers: - -Providers -========= - -Assuming BitBake has been instructed to execute a target and that all -the recipe files have been parsed, BitBake starts to figure out how to -build the target. BitBake looks through the :term:`PROVIDES` list for each -of the recipes. A :term:`PROVIDES` list is the list of names by which the -recipe can be known. Each recipe's :term:`PROVIDES` list is created -implicitly through the recipe's :term:`PN` variable and -explicitly through the recipe's :term:`PROVIDES` -variable, which is optional. - -When a recipe uses :term:`PROVIDES`, that recipe's functionality can be -found under an alternative name or names other than the implicit :term:`PN` -name. As an example, suppose a recipe named ``keyboard_1.0.bb`` -contained the following:: - - PROVIDES += "fullkeyboard" - -The :term:`PROVIDES` -list for this recipe becomes "keyboard", which is implicit, and -"fullkeyboard", which is explicit. Consequently, the functionality found -in ``keyboard_1.0.bb`` can be found under two different names. - -.. _bb-bitbake-preferences: - -Preferences -=========== - -The :term:`PROVIDES` list is only part of the solution for figuring out a -target's recipes. Because targets might have multiple providers, BitBake -needs to prioritize providers by determining provider preferences. - -A common example in which a target has multiple providers is -"virtual/kernel", which is on the :term:`PROVIDES` list for each kernel -recipe. Each machine often selects the best kernel provider by using a -line similar to the following in the machine configuration file:: - - PREFERRED_PROVIDER_virtual/kernel = "linux-yocto" - -The default :term:`PREFERRED_PROVIDER` is the provider -with the same name as the target. BitBake iterates through each target -it needs to build and resolves them and their dependencies using this -process. - -Understanding how providers are chosen is made complicated by the fact -that multiple versions might exist for a given provider. BitBake -defaults to the highest version of a provider. Version comparisons are -made using the same method as Debian. You can use the -:term:`PREFERRED_VERSION` variable to -specify a particular version. You can influence the order by using the -:term:`DEFAULT_PREFERENCE` variable. - -By default, files have a preference of "0". Setting -:term:`DEFAULT_PREFERENCE` to "-1" makes the recipe unlikely to be used -unless it is explicitly referenced. Setting :term:`DEFAULT_PREFERENCE` to -"1" makes it likely the recipe is used. :term:`PREFERRED_VERSION` overrides -any :term:`DEFAULT_PREFERENCE` setting. :term:`DEFAULT_PREFERENCE` is often used -to mark newer and more experimental recipe versions until they have -undergone sufficient testing to be considered stable. - -When there are multiple "versions" of a given recipe, BitBake defaults -to selecting the most recent version, unless otherwise specified. If the -recipe in question has a -:term:`DEFAULT_PREFERENCE` set lower than -the other recipes (default is 0), then it will not be selected. This -allows the person or persons maintaining the repository of recipe files -to specify their preference for the default selected version. -Additionally, the user can specify their preferred version. - -If the first recipe is named ``a_1.1.bb``, then the -:term:`PN` variable will be set to "a", and the -:term:`PV` variable will be set to 1.1. - -Thus, if a recipe named ``a_1.2.bb`` exists, BitBake will choose 1.2 by -default. However, if you define the following variable in a ``.conf`` -file that BitBake parses, you can change that preference:: - - PREFERRED_VERSION_a = "1.1" - -.. note:: - - It is common for a recipe to provide two versions -- a stable, - numbered (and preferred) version, and a version that is automatically - checked out from a source code repository that is considered more - "bleeding edge" but can be selected only explicitly. - - For example, in the OpenEmbedded codebase, there is a standard, - versioned recipe file for BusyBox, ``busybox_1.22.1.bb``, but there - is also a Git-based version, ``busybox_git.bb``, which explicitly - contains the line :: - - DEFAULT_PREFERENCE = "-1" - - to ensure that the - numbered, stable version is always preferred unless the developer - selects otherwise. - -.. _bb-bitbake-dependencies: - -Dependencies -============ - -Each target BitBake builds consists of multiple tasks such as ``fetch``, -``unpack``, ``patch``, ``configure``, and ``compile``. For best -performance on multi-core systems, BitBake considers each task as an -independent entity with its own set of dependencies. - -Dependencies are defined through several variables. You can find -information about variables BitBake uses in the -:doc:`bitbake-user-manual-ref-variables` near the end of this manual. At a -basic level, it is sufficient to know that BitBake uses the -:term:`DEPENDS` and -:term:`RDEPENDS` variables when calculating -dependencies. - -For more information on how BitBake handles dependencies, see the -:ref:`bitbake-user-manual/bitbake-user-manual-metadata:Dependencies` -section. - -.. _ref-bitbake-tasklist: - -The Task List -============= - -Based on the generated list of providers and the dependency information, -BitBake can now calculate exactly what tasks it needs to run and in what -order it needs to run them. The -:ref:`bitbake-user-manual/bitbake-user-manual-execution:executing tasks` -section has more information on how BitBake chooses which task to -execute next. - -The build now starts with BitBake forking off threads up to the limit -set in the :term:`BB_NUMBER_THREADS` -variable. BitBake continues to fork threads as long as there are tasks -ready to run, those tasks have all their dependencies met, and the -thread threshold has not been exceeded. - -It is worth noting that you can greatly speed up the build time by -properly setting the :term:`BB_NUMBER_THREADS` variable. - -As each task completes, a timestamp is written to the directory -specified by the :term:`STAMP` variable. On subsequent -runs, BitBake looks in the build directory within ``tmp/stamps`` and -does not rerun tasks that are already completed unless a timestamp is -found to be invalid. Currently, invalid timestamps are only considered -on a per recipe file basis. So, for example, if the configure stamp has -a timestamp greater than the compile timestamp for a given target, then -the compile task would rerun. Running the compile task again, however, -has no effect on other providers that depend on that target. - -The exact format of the stamps is partly configurable. In modern -versions of BitBake, a hash is appended to the stamp so that if the -configuration changes, the stamp becomes invalid and the task is -automatically rerun. This hash, or signature used, is governed by the -signature policy that is configured (see the -:ref:`bitbake-user-manual/bitbake-user-manual-execution:checksums (signatures)` -section for information). It is also -possible to append extra metadata to the stamp using the -``[stamp-extra-info]`` task flag. For example, OpenEmbedded uses this -flag to make some tasks machine-specific. - -.. note:: - - Some tasks are marked as "nostamp" tasks. No timestamp file is - created when these tasks are run. Consequently, "nostamp" tasks are - always rerun. - -For more information on tasks, see the -:ref:`bitbake-user-manual/bitbake-user-manual-metadata:tasks` section. - -Executing Tasks -=============== - -Tasks can be either a shell task or a Python task. For shell tasks, -BitBake writes a shell script to -``${``\ :term:`T`\ ``}/run.do_taskname.pid`` and then -executes the script. The generated shell script contains all the -exported variables, and the shell functions with all variables expanded. -Output from the shell script goes to the file -``${``\ :term:`T`\ ``}/log.do_taskname.pid``. Looking at the expanded shell functions in -the run file and the output in the log files is a useful debugging -technique. - -For Python tasks, BitBake executes the task internally and logs -information to the controlling terminal. Future versions of BitBake will -write the functions to files similar to the way shell tasks are handled. -Logging will be handled in a way similar to shell tasks as well. - -The order in which BitBake runs the tasks is controlled by its task -scheduler. It is possible to configure the scheduler and define custom -implementations for specific use cases. For more information, see these -variables that control the behavior: - -- :term:`BB_SCHEDULER` - -- :term:`BB_SCHEDULERS` - -It is possible to have functions run before and after a task's main -function. This is done using the ``[prefuncs]`` and ``[postfuncs]`` -flags of the task that lists the functions to run. - -.. _checksums: - -Checksums (Signatures) -====================== - -A checksum is a unique signature of a task's inputs. The signature of a -task can be used to determine if a task needs to be run. Because it is a -change in a task's inputs that triggers running the task, BitBake needs -to detect all the inputs to a given task. For shell tasks, this turns -out to be fairly easy because BitBake generates a "run" shell script for -each task and it is possible to create a checksum that gives you a good -idea of when the task's data changes. - -To complicate the problem, some things should not be included in the -checksum. First, there is the actual specific build path of a given task -- the working directory. It does not matter if the working directory -changes because it should not affect the output for target packages. The -simplistic approach for excluding the working directory is to set it to -some fixed value and create the checksum for the "run" script. BitBake -goes one step better and uses the -:term:`BB_BASEHASH_IGNORE_VARS` variable -to define a list of variables that should never be included when -generating the signatures. - -Another problem results from the "run" scripts containing functions that -might or might not get called. The incremental build solution contains -code that figures out dependencies between shell functions. This code is -used to prune the "run" scripts down to the minimum set, thereby -alleviating this problem and making the "run" scripts much more readable -as a bonus. - -So far we have solutions for shell scripts. What about Python tasks? The -same approach applies even though these tasks are more difficult. The -process needs to figure out what variables a Python function accesses -and what functions it calls. Again, the incremental build solution -contains code that first figures out the variable and function -dependencies, and then creates a checksum for the data used as the input -to the task. - -Like the working directory case, situations exist where dependencies -should be ignored. For these cases, you can instruct the build process -to ignore a dependency by using a line like the following:: - - PACKAGE_ARCHS[vardepsexclude] = "MACHINE" - -This example ensures that the -``PACKAGE_ARCHS`` variable does not depend on the value of ``MACHINE``, -even if it does reference it. - -Equally, there are cases where we need to add dependencies BitBake is -not able to find. You can accomplish this by using a line like the -following:: - - PACKAGE_ARCHS[vardeps] = "MACHINE" - -This example explicitly -adds the ``MACHINE`` variable as a dependency for ``PACKAGE_ARCHS``. - -Consider a case with in-line Python, for example, where BitBake is not -able to figure out dependencies. When running in debug mode (i.e. using -``-DDD``), BitBake produces output when it discovers something for which -it cannot figure out dependencies. - -Thus far, this section has limited discussion to the direct inputs into -a task. Information based on direct inputs is referred to as the -"basehash" in the code. However, there is still the question of a task's -indirect inputs --- the things that were already built and present in the -build directory. The checksum (or signature) for a particular task needs -to add the hashes of all the tasks on which the particular task depends. -Choosing which dependencies to add is a policy decision. However, the -effect is to generate a master checksum that combines the basehash and -the hashes of the task's dependencies. - -At the code level, there are a variety of ways both the basehash and the -dependent task hashes can be influenced. Within the BitBake -configuration file, we can give BitBake some extra information to help -it construct the basehash. The following statement effectively results -in a list of global variable dependency excludes --- variables never -included in any checksum. This example uses variables from OpenEmbedded -to help illustrate the concept:: - - BB_BASEHASH_IGNORE_VARS ?= "TMPDIR FILE PATH PWD BB_TASKHASH BBPATH DL_DIR \ - SSTATE_DIR THISDIR FILESEXTRAPATHS FILE_DIRNAME HOME LOGNAME SHELL \ - USER FILESPATH STAGING_DIR_HOST STAGING_DIR_TARGET COREBASE PRSERV_HOST \ - PRSERV_DUMPDIR PRSERV_DUMPFILE PRSERV_LOCKDOWN PARALLEL_MAKE \ - CCACHE_DIR EXTERNAL_TOOLCHAIN CCACHE CCACHE_DISABLE LICENSE_PATH SDKPKGSUFFIX" - -The previous example excludes the work directory, which is part of -``TMPDIR``. - -The rules for deciding which hashes of dependent tasks to include -through dependency chains are more complex and are generally -accomplished with a Python function. The code in -``meta/lib/oe/sstatesig.py`` shows two examples of this and also -illustrates how you can insert your own policy into the system if so -desired. This file defines the basic signature generator -OpenEmbedded-Core uses: "OEBasicHash". By default, there -is a dummy "noop" signature handler enabled in BitBake. This means that -behavior is unchanged from previous versions. ``OE-Core`` uses the -"OEBasicHash" signature handler by default through this setting in the -``bitbake.conf`` file:: - - BB_SIGNATURE_HANDLER ?= "OEBasicHash" - -The main feature of the "OEBasicHash" :term:`BB_SIGNATURE_HANDLER` is that -it adds the task hash to the stamp files. Thanks to this, any metadata -change will change the task hash, automatically causing the task to be run -again. This removes the need to bump :term:`PR` values, and changes to -metadata automatically ripple across the build. - -It is also worth noting that the end result of signature -generators is to make some dependency and hash information available to -the build. This information includes: - -- ``BB_BASEHASH_task-``\ *taskname*: The base hashes for each task in the - recipe. - -- ``BB_BASEHASH_``\ *filename:taskname*: The base hashes for each - dependent task. - -- :term:`BB_TASKHASH`: The hash of the currently running task. - -It is worth noting that BitBake's "-S" option lets you debug BitBake's -processing of signatures. The options passed to -S allow different -debugging modes to be used, either using BitBake's own debug functions -or possibly those defined in the metadata/signature handler itself. The -simplest parameter to pass is "none", which causes a set of signature -information to be written out into ``STAMPS_DIR`` corresponding to the -targets specified. The other currently available parameter is -"printdiff", which causes BitBake to try to establish the most recent -signature match it can (e.g. in the sstate cache) and then run -compare the matched signatures to determine the stamps and delta -where these two stamp trees diverge. This can be used to determine why -tasks need to be re-run in situations where that is not expected. - -.. note:: - - It is likely that future versions of BitBake will provide other - signature handlers triggered through additional "-S" parameters. - -You can find more information on checksum metadata in the -:ref:`bitbake-user-manual/bitbake-user-manual-metadata:task checksums and setscene` -section. - -Setscene -======== - -The setscene process enables BitBake to handle "pre-built" artifacts. -The ability to handle and reuse these artifacts allows BitBake the -luxury of not having to build something from scratch every time. -Instead, BitBake can use, when possible, existing build artifacts. - -BitBake needs to have reliable data indicating whether or not an -artifact is compatible. Signatures, described in the previous section, -provide an ideal way of representing whether an artifact is compatible. -If a signature is the same, an object can be reused. - -If an object can be reused, the problem then becomes how to replace a -given task or set of tasks with the pre-built artifact. BitBake solves -the problem with the "setscene" process. - -When BitBake is asked to build a given target, before building anything, -it first asks whether cached information is available for any of the -targets it's building, or any of the intermediate targets. If cached -information is available, BitBake uses this information instead of -running the main tasks. - -BitBake first calls the function defined by the -:term:`BB_HASHCHECK_FUNCTION` variable -with a list of tasks and corresponding hashes it wants to build. This -function is designed to be fast and returns a list of the tasks for -which it believes in can obtain artifacts. - -Next, for each of the tasks that were returned as possibilities, BitBake -executes a setscene version of the task that the possible artifact -covers. Setscene versions of a task have the string "_setscene" appended -to the task name. So, for example, the task with the name ``xxx`` has a -setscene task named ``xxx_setscene``. The setscene version of the task -executes and provides the necessary artifacts returning either success -or failure. - -As previously mentioned, an artifact can cover more than one task. For -example, it is pointless to obtain a compiler if you already have the -compiled binary. To handle this, BitBake calls the -:term:`BB_SETSCENE_DEPVALID` function for -each successful setscene task to know whether or not it needs to obtain -the dependencies of that task. - -You can find more information on setscene metadata in the -:ref:`bitbake-user-manual/bitbake-user-manual-metadata:task checksums and setscene` -section. - -Logging -======= - -In addition to the standard command line option to control how verbose -builds are when execute, bitbake also supports user defined -configuration of the `Python -logging `__ facilities -through the :term:`BB_LOGCONFIG` variable. This -variable defines a JSON or YAML `logging -configuration `__ -that will be intelligently merged into the default configuration. The -logging configuration is merged using the following rules: - -- The user defined configuration will completely replace the default - configuration if top level key ``bitbake_merge`` is set to the value - ``False``. In this case, all other rules are ignored. - -- The user configuration must have a top level ``version`` which must - match the value of the default configuration. - -- Any keys defined in the ``handlers``, ``formatters``, or ``filters``, - will be merged into the same section in the default configuration, - with the user specified keys taking replacing a default one if there - is a conflict. In practice, this means that if both the default - configuration and user configuration specify a handler named - ``myhandler``, the user defined one will replace the default. To - prevent the user from inadvertently replacing a default handler, - formatter, or filter, all of the default ones are named with a prefix - of "``BitBake.``" - -- If a logger is defined by the user with the key ``bitbake_merge`` set - to ``False``, that logger will be completely replaced by user - configuration. In this case, no other rules will apply to that - logger. - -- All user defined ``filter`` and ``handlers`` properties for a given - logger will be merged with corresponding properties from the default - logger. For example, if the user configuration adds a filter called - ``myFilter`` to the ``BitBake.SigGen``, and the default configuration - adds a filter called ``BitBake.defaultFilter``, both filters will be - applied to the logger - -As a first example, you can create a ``hashequiv.json`` user logging -configuration file to log all Hash Equivalence related messages of ``VERBOSE`` -or higher priority to a file called ``hashequiv.log``:: - - { - "version": 1, - "handlers": { - "autobuilderlog": { - "class": "logging.FileHandler", - "formatter": "logfileFormatter", - "level": "DEBUG", - "filename": "hashequiv.log", - "mode": "w" - } - }, - "formatters": { - "logfileFormatter": { - "format": "%(name)s: %(levelname)s: %(message)s" - } - }, - "loggers": { - "BitBake.SigGen.HashEquiv": { - "level": "VERBOSE", - "handlers": ["autobuilderlog"] - }, - "BitBake.RunQueue.HashEquiv": { - "level": "VERBOSE", - "handlers": ["autobuilderlog"] - } - } - } - -Then set the :term:`BB_LOGCONFIG` variable in ``conf/local.conf``:: - - BB_LOGCONFIG = "hashequiv.json" - -Another example is this ``warn.json`` file to log all ``WARNING`` and -higher priority messages to a ``warn.log`` file:: - - { - "version": 1, - "formatters": { - "warnlogFormatter": { - "()": "bb.msg.BBLogFormatter", - "format": "%(levelname)s: %(message)s" - } - }, - - "handlers": { - "warnlog": { - "class": "logging.FileHandler", - "formatter": "warnlogFormatter", - "level": "WARNING", - "filename": "warn.log" - } - }, - - "loggers": { - "BitBake": { - "handlers": ["warnlog"] - } - }, - - "@disable_existing_loggers": false - } - -Note that BitBake's helper classes for structured logging are implemented in -``lib/bb/msg.py``. diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst deleted file mode 100644 index 2b06c1d471..0000000000 --- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst +++ /dev/null @@ -1,888 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -===================== -File Download Support -===================== - -| - -BitBake's fetch module is a standalone piece of library code that deals -with the intricacies of downloading source code and files from remote -systems. Fetching source code is one of the cornerstones of building -software. As such, this module forms an important part of BitBake. - -The current fetch module is called "fetch2" and refers to the fact that -it is the second major version of the API. The original version is -obsolete and has been removed from the codebase. Thus, in all cases, -"fetch" refers to "fetch2" in this manual. - -The Download (Fetch) -==================== - -BitBake takes several steps when fetching source code or files. The -fetcher codebase deals with two distinct processes in order: obtaining -the files from somewhere (cached or otherwise) and then unpacking those -files into a specific location and perhaps in a specific way. Getting -and unpacking the files is often optionally followed by patching. -Patching, however, is not covered by this module. - -The code to execute the first part of this process, a fetch, looks -something like the following:: - - src_uri = (d.getVar('SRC_URI') or "").split() - fetcher = bb.fetch2.Fetch(src_uri, d) - fetcher.download() - -This code sets up an instance of the fetch class. The instance uses a -space-separated list of URLs from the :term:`SRC_URI` -variable and then calls the ``download`` method to download the files. - -The instantiation of the fetch class is usually followed by:: - - rootdir = l.getVar('UNPACKDIR') - fetcher.unpack(rootdir) - -This code unpacks the downloaded files to the specified by ``UNPACKDIR``. - -.. note:: - - For convenience, the naming in these examples matches the variables - used by OpenEmbedded. If you want to see the above code in action, - examine the OpenEmbedded class file ``base.bbclass`` - . - -The :term:`SRC_URI` and ``UNPACKDIR`` variables are not hardcoded into the -fetcher, since those fetcher methods can be (and are) called with -different variable names. In OpenEmbedded for example, the shared state -(sstate) code uses the fetch module to fetch the sstate files. - -When the ``download()`` method is called, BitBake tries to resolve the -URLs by looking for source files in a specific search order: - -- *Pre-mirror Sites:* BitBake first uses pre-mirrors to try and find - source files. These locations are defined using the - :term:`PREMIRRORS` variable. - -- *Source URI:* If pre-mirrors fail, BitBake uses the original URL (e.g - from :term:`SRC_URI`). - -- *Mirror Sites:* If fetch failures occur, BitBake next uses mirror - locations as defined by the :term:`MIRRORS` variable. - -For each URL passed to the fetcher, the fetcher calls the submodule that -handles that particular URL type. This behavior can be the source of -some confusion when you are providing URLs for the :term:`SRC_URI` variable. -Consider the following two URLs:: - - https://git.yoctoproject.org/git/poky;protocol=git - git://git.yoctoproject.org/git/poky;protocol=http - -In the former case, the URL is passed to the ``wget`` fetcher, which does not -understand "git". Therefore, the latter case is the correct form since the Git -fetcher does know how to use HTTP as a transport. - -Here are some examples that show commonly used mirror definitions:: - - PREMIRRORS ?= "\ - bzr://.*/.\* http://somemirror.org/sources/ \ - cvs://.*/.\* http://somemirror.org/sources/ \ - git://.*/.\* http://somemirror.org/sources/ \ - hg://.*/.\* http://somemirror.org/sources/ \ - osc://.*/.\* http://somemirror.org/sources/ \ - p4://.*/.\* http://somemirror.org/sources/ \ - svn://.*/.\* http://somemirror.org/sources/" - - MIRRORS =+ "\ - ftp://.*/.\* http://somemirror.org/sources/ \ - http://.*/.\* http://somemirror.org/sources/ \ - https://.*/.\* http://somemirror.org/sources/" - -It is useful to note that BitBake -supports cross-URLs. It is possible to mirror a Git repository on an -HTTP server as a tarball. This is what the ``git://`` mapping in the -previous example does. - -Since network accesses are slow, BitBake maintains a cache of files -downloaded from the network. Any source files that are not local (i.e. -downloaded from the Internet) are placed into the download directory, -which is specified by the :term:`DL_DIR` variable. - -File integrity is of key importance for reproducing builds. For -non-local archive downloads, the fetcher code can verify SHA-256 and MD5 -checksums to ensure the archives have been downloaded correctly. You can -specify these checksums by using the :term:`SRC_URI` variable with the -appropriate varflags as follows:: - - SRC_URI[md5sum] = "value" - SRC_URI[sha256sum] = "value" - -You can also specify the checksums as -parameters on the :term:`SRC_URI` as shown below:: - - SRC_URI = "http://example.com/foobar.tar.bz2;md5sum=4a8e0f237e961fd7785d19d07fdb994d" - -If multiple URIs exist, you can specify the checksums either directly as -in the previous example, or you can name the URLs. The following syntax -shows how you name the URIs:: - - SRC_URI = "http://example.com/foobar.tar.bz2;name=foo" - SRC_URI[foo.md5sum] = 4a8e0f237e961fd7785d19d07fdb994d - -After a file has been downloaded and -has had its checksum checked, a ".done" stamp is placed in :term:`DL_DIR`. -BitBake uses this stamp during subsequent builds to avoid downloading or -comparing a checksum for the file again. - -.. note:: - - It is assumed that local storage is safe from data corruption. If - this were not the case, there would be bigger issues to worry about. - -If :term:`BB_STRICT_CHECKSUM` is set, any -download without a checksum triggers an error message. The -:term:`BB_NO_NETWORK` variable can be used to -make any attempted network access a fatal error, which is useful for -checking that mirrors are complete as well as other things. - -If :term:`BB_CHECK_SSL_CERTS` is set to ``0`` then SSL certificate checking will -be disabled. This variable defaults to ``1`` so SSL certificates are normally -checked. - -.. _bb-the-unpack: - -The Unpack -========== - -The unpack process usually immediately follows the download. For all -URLs except Git URLs, BitBake uses the common ``unpack`` method. - -A number of parameters exist that you can specify within the URL to -govern the behavior of the unpack stage: - -- *"unpack":* Controls whether the URL components are unpacked. If set to - "1", which is the default, the components are unpacked. If set to - "0", the unpack stage leaves the file alone. This parameter is useful - when you want an archive to be copied in and not be unpacked. - -- *"dos":* Applies to ``.zip`` and ``.jar`` files and specifies whether - to use DOS line ending conversion on text files. - -- *"striplevel":* Strip specified number of leading components (levels) - from file names on extraction. - -- *"subdir":* Unpacks the specific URL to the specified subdirectory - within the specified root directory. This path can be further modified - by fetcher specific parameters. - -- *"name":* Assigns a name to a given component of the :term:`SRC_URI`. - This component is later referenced by this name when specifying its - :term:`SRCREV` or :term:`SRC_URI` checksum, or to correctly place its - revision in the package version string with aid of :term:`SRCREV_FORMAT`. - -The unpack call automatically decompresses and extracts files with ".Z", -".z", ".gz", ".xz", ".zip", ".jar", ".ipk", ".rpm". ".srpm", ".deb" and -".bz2" extensions as well as various combinations of tarball extensions. - -As mentioned, the Git fetcher has its own unpack method that is -optimized to work with Git trees. Basically, this method works by -cloning the tree into the final directory. The process is completed -using references so that there is only one central copy of the Git -metadata needed. - -.. _bb-fetchers: - -Fetchers -======== - -As mentioned earlier, the URL prefix determines which fetcher submodule -BitBake uses. Each submodule can support different URL parameters, which -are described in the following sections. - -.. _local-file-fetcher: - -Local file fetcher (``file://``) --------------------------------- - -This submodule handles URLs that begin with ``file://``. The filename -you specify within the URL can be either an absolute or relative path to -a file. If the filename is relative, the contents of the -:term:`FILESPATH` variable is used in the same way -``PATH`` is used to find executables. If the file cannot be found, it is -assumed that it is available in :term:`DL_DIR` by the -time the ``download()`` method is called. - -If you specify a directory, the entire directory is unpacked. - -Here are a couple of example URLs, the first relative and the second -absolute:: - - SRC_URI = "file://relativefile.patch" - SRC_URI = "file:///Users/ich/very_important_software" - -.. _http-ftp-fetcher: - -HTTP/FTP wget fetcher (``http://``, ``ftp://``, ``https://``) -------------------------------------------------------------- - -This fetcher obtains files from web and FTP servers. Internally, the -fetcher uses the wget utility. - -The executable and parameters used are specified by the -``FETCHCMD_wget`` variable, which defaults to sensible values. The -fetcher supports a parameter "downloadfilename" that allows the name of -the downloaded file to be specified. Specifying the name of the -downloaded file is useful for avoiding collisions in -:term:`DL_DIR` when dealing with multiple files that -have the same name. - -If a username and password are specified in the ``SRC_URI``, a Basic -Authorization header will be added to each request, including across redirects. -To instead limit the Authorization header to the first request, add -"redirectauth=0" to the list of parameters. - -Some example URLs are as follows:: - - SRC_URI = "http://oe.handhelds.org/not_there.aac" - SRC_URI = "ftp://oe.handhelds.org/not_there_as_well.aac" - SRC_URI = "ftp://you@oe.handhelds.org/home/you/secret.plan" - -.. note:: - - Because URL parameters are delimited by semi-colons, this can - introduce ambiguity when parsing URLs that also contain semi-colons, - for example:: - - SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git;a=snapshot;h=a5dd47" - - Such URLs should should be modified by replacing semi-colons with '&' - characters:: - - SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git&a=snapshot&h=a5dd47" - - In most cases this should work. Treating semi-colons and '&' in - queries identically is recommended by the World Wide Web Consortium - (W3C). Note that due to the nature of the URL, you may have to - specify the name of the downloaded file as well:: - - SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git&a=snapshot&h=a5dd47;downloadfilename=myfile.bz2" - - -.. _cvs-fetcher: - -CVS fetcher (``(cvs://``) -------------------------- - -This submodule handles checking out files from the CVS version control -system. You can configure it using a number of different variables: - -- :term:`FETCHCMD_cvs `: The name of the executable to use when running - the ``cvs`` command. This name is usually "cvs". - -- :term:`SRCDATE`: The date to use when fetching the CVS source code. A - special value of "now" causes the checkout to be updated on every - build. - -- :term:`CVSDIR`: Specifies where a temporary - checkout is saved. The location is often ``DL_DIR/cvs``. - -- CVS_PROXY_HOST: The name to use as a "proxy=" parameter to the - ``cvs`` command. - -- CVS_PROXY_PORT: The port number to use as a "proxyport=" - parameter to the ``cvs`` command. - -As well as the standard username and password URL syntax, you can also -configure the fetcher with various URL parameters: - -The supported parameters are as follows: - -- *"method":* The protocol over which to communicate with the CVS - server. By default, this protocol is "pserver". If "method" is set to - "ext", BitBake examines the "rsh" parameter and sets ``CVS_RSH``. You - can use "dir" for local directories. - -- *"module":* Specifies the module to check out. You must supply this - parameter. - -- *"tag":* Describes which CVS TAG should be used for the checkout. By - default, the TAG is empty. - -- *"date":* Specifies a date. If no "date" is specified, the - :term:`SRCDATE` of the configuration is used to - checkout a specific date. The special value of "now" causes the - checkout to be updated on every build. - -- *"localdir":* Used to rename the module. Effectively, you are - renaming the output directory to which the module is unpacked. You - are forcing the module into a special directory relative to - :term:`CVSDIR`. - -- *"rsh":* Used in conjunction with the "method" parameter. - -- *"scmdata":* Causes the CVS metadata to be maintained in the tarball - the fetcher creates when set to "keep". The tarball is expanded into - the work directory. By default, the CVS metadata is removed. - -- *"fullpath":* Controls whether the resulting checkout is at the - module level, which is the default, or is at deeper paths. - -- *"norecurse":* Causes the fetcher to only checkout the specified - directory with no recurse into any subdirectories. - -- *"port":* The port to which the CVS server connects. - -Some example URLs are as follows:: - - SRC_URI = "cvs://CVSROOT;module=mymodule;tag=some-version;method=ext" - SRC_URI = "cvs://CVSROOT;module=mymodule;date=20060126;localdir=usethat" - -.. _svn-fetcher: - -Subversion (SVN) Fetcher (``svn://``) -------------------------------------- - -This fetcher submodule fetches code from the Subversion source control -system. The executable used is specified by ``FETCHCMD_svn``, which -defaults to "svn". The fetcher's temporary working directory is set by -:term:`SVNDIR`, which is usually ``DL_DIR/svn``. - -The supported parameters are as follows: - -- *"module":* The name of the svn module to checkout. You must provide - this parameter. You can think of this parameter as the top-level - directory of the repository data you want. - -- *"path_spec":* A specific directory in which to checkout the - specified svn module. - -- *"protocol":* The protocol to use, which defaults to "svn". If - "protocol" is set to "svn+ssh", the "ssh" parameter is also used. - -- *"rev":* The revision of the source code to checkout. - -- *"scmdata":* Causes the ".svn" directories to be available during - compile-time when set to "keep". By default, these directories are - removed. - -- *"ssh":* An optional parameter used when "protocol" is set to - "svn+ssh". You can use this parameter to specify the ssh program used - by svn. - -- *"transportuser":* When required, sets the username for the - transport. By default, this parameter is empty. The transport - username is different than the username used in the main URL, which - is passed to the subversion command. - -Following are three examples using svn:: - - SRC_URI = "svn://myrepos/proj1;module=vip;protocol=http;rev=667" - SRC_URI = "svn://myrepos/proj1;module=opie;protocol=svn+ssh" - SRC_URI = "svn://myrepos/proj1;module=trunk;protocol=http;path_spec=${MY_DIR}/proj1" - -.. _git-fetcher: - -Git Fetcher (``git://``) ------------------------- - -This fetcher submodule fetches code from the Git source control system. -The fetcher works by creating a bare clone of the remote into -:term:`GITDIR`, which is usually ``DL_DIR/git2``. This -bare clone is then cloned into the work directory during the unpack -stage when a specific tree is checked out. This is done using alternates -and by reference to minimize the amount of duplicate data on the disk -and make the unpack process fast. The executable used can be set with -``FETCHCMD_git``. - -This fetcher supports the following parameters: - -- *"protocol":* The protocol used to fetch the files. The default is - "git" when a hostname is set. If a hostname is not set, the Git - protocol is "file". You can also use "http", "https", "ssh" and - "rsync". - - .. note:: - - When ``protocol`` is "ssh", the URL expected in :term:`SRC_URI` differs - from the one that is typically passed to ``git clone`` command and provided - by the Git server to fetch from. For example, the URL returned by GitLab - server for ``mesa`` when cloning over SSH is - ``git@gitlab.freedesktop.org:mesa/mesa.git``, however the expected URL in - :term:`SRC_URI` is the following:: - - SRC_URI = "git://git@gitlab.freedesktop.org/mesa/mesa.git;branch=main;protocol=ssh;..." - - Note the ``:`` character changed for a ``/`` before the path to the project. - -- *"nocheckout":* Tells the fetcher to not checkout source code when - unpacking when set to "1". Set this option for the URL where there is - a custom routine to checkout code. The default is "0". - -- *"rebaseable":* Indicates that the upstream Git repository can be - rebased. You should set this parameter to "1" if revisions can become - detached from branches. In this case, the source mirror tarball is - done per revision, which has a loss of efficiency. Rebasing the - upstream Git repository could cause the current revision to disappear - from the upstream repository. This option reminds the fetcher to - preserve the local cache carefully for future use. The default value - for this parameter is "0". - -- *"nobranch":* Tells the fetcher to not check the SHA validation for - the branch when set to "1". The default is "0". Set this option for - the recipe that refers to the commit that is valid for any namespace - (branch, tag, ...) instead of the branch. - -- *"bareclone":* Tells the fetcher to clone a bare clone into the - destination directory without checking out a working tree. Only the - raw Git metadata is provided. This parameter implies the "nocheckout" - parameter as well. - -- *"branch":* The branch(es) of the Git tree to clone. Unless - "nobranch" is set to "1", this is a mandatory parameter. The number of - branch parameters must match the number of name parameters. - -- *"rev":* The revision to use for the checkout. If :term:`SRCREV` is also set, - this parameter must match its value. - -- *"tag":* Specifies a tag to use when fetching. To correctly resolve - tags, BitBake must access the network. If a ``rev`` parameter or - :term:`SRCREV` is also specified, network access is not necessary to resolve - the tag and instead, it is verified that they both resolve to the same commit - SHA at unpack time. The ``tag`` parameter is optional, but strongly - recommended if the checked out revision is a tag. - -- *"subpath":* Limits the checkout to a specific subpath of the tree. - By default, the whole tree is checked out. - -- *"destsuffix":* The name of the path in which to place the checkout. - By default, the path is ``git/``. - -- *"usehead":* Enables local ``git://`` URLs to use the current branch - HEAD as the revision for use with :term:`AUTOREV`. The "usehead" - parameter implies no branch and only works when the transfer protocol - is ``file://``. - -Here are some example URLs:: - - SRC_URI = "git://github.com/fronteed/icheck.git;protocol=https;branch=${PV};tag=${PV}" - SRC_URI = "git://github.com/asciidoc/asciidoc-py;protocol=https;branch=main" - SRC_URI = "git://git@gitlab.freedesktop.org/mesa/mesa.git;branch=main;protocol=ssh;..." - -.. note:: - - Specifying passwords directly in ``git://`` urls is not supported. - There are several reasons: :term:`SRC_URI` is often written out to logs and - other places, and that could easily leak passwords; it is also all too - easy to share metadata without removing passwords. SSH keys, ``~/.netrc`` - and ``~/.ssh/config`` files can be used as alternatives. - -Using tags with the git fetcher may cause surprising behaviour. Bitbake needs to -resolve the tag to a specific revision and to do that, it has to connect to and use -the upstream repository. This is because the revision the tags point at can change and -we've seen cases of this happening in well known public repositories. This can mean -many more network connections than expected and recipes may be reparsed at every build. -Source mirrors will also be bypassed as the upstream repository is the only source -of truth to resolve the revision accurately. For these reasons, whilst the fetcher -can support tags, we recommend being specific about revisions in recipes. - -.. _gitsm-fetcher: - -Git Submodule Fetcher (``gitsm://``) ------------------------------------- - -This fetcher submodule inherits from the :ref:`Git -fetcher` and extends that fetcher's behavior by fetching a -repository's submodules. :term:`SRC_URI` is passed to the Git fetcher as -described in the :ref:`bitbake-user-manual/bitbake-user-manual-fetching:git -fetcher (\`\`git://\`\`)` section. - -.. note:: - - You must clean a recipe when switching between '``git://``' and - '``gitsm://``' URLs. - - The Git Submodules fetcher is not a complete fetcher implementation. - The fetcher has known issues where it does not use the normal source - mirroring infrastructure properly. Further, the submodule sources it - fetches are not visible to the licensing and source archiving - infrastructures. - -.. _clearcase-fetcher: - -ClearCase Fetcher (``ccrc://``) -------------------------------- - -This fetcher submodule fetches code from a -`ClearCase `__ -repository. - -To use this fetcher, make sure your recipe has proper -:term:`SRC_URI`, :term:`SRCREV`, and -:term:`PV` settings. Here is an example:: - - SRC_URI = "ccrc://cc.example.org/ccrc;vob=/example_vob;module=/example_module" - SRCREV = "EXAMPLE_CLEARCASE_TAG" - PV = "${@d.getVar("SRCREV", False).replace("/", "+")}" - -The fetcher uses the ``rcleartool`` or -``cleartool`` remote client, depending on which one is available. - -Following are options for the :term:`SRC_URI` statement: - -- *"vob":* The name, which must include the prepending "/" character, - of the ClearCase VOB. This option is required. - -- *"module":* The module, which must include the prepending "/" - character, in the selected VOB. - - .. note:: - - The module and vob options are combined to create the load rule in the - view config spec. As an example, consider the vob and module values from - the SRC_URI statement at the start of this section. Combining those values - results in the following:: - - load /example_vob/example_module - -- *"proto":* The protocol, which can be either ``http`` or ``https``. - -By default, the fetcher creates a configuration specification. If you -want this specification written to an area other than the default, use -the ``CCASE_CUSTOM_CONFIG_SPEC`` variable in your recipe to define where -the specification is written. - -.. note:: - - the :term:`SRCREV` loses its functionality if you specify this variable. - However, :term:`SRCREV` is still used to label the archive after a fetch even - though it does not define what is fetched. - -Here are a couple of other behaviors worth mentioning: - -- When using ``cleartool``, the login of ``cleartool`` is handled by - the system. The login require no special steps. - -- In order to use ``rcleartool`` with authenticated users, an - "rcleartool login" is necessary before using the fetcher. - -.. _perforce-fetcher: - -Perforce Fetcher (``p4://``) ----------------------------- - -This fetcher submodule fetches code from the -`Perforce `__ source control system. The -executable used is specified by ``FETCHCMD_p4``, which defaults to "p4". -The fetcher's temporary working directory is set by -:term:`P4DIR`, which defaults to "DL_DIR/p4". -The fetcher does not make use of a perforce client, instead it -relies on ``p4 files`` to retrieve a list of -files and ``p4 print`` to transfer the content -of those files locally. - -To use this fetcher, make sure your recipe has proper -:term:`SRC_URI`, :term:`SRCREV`, and -:term:`PV` values. The p4 executable is able to use the -config file defined by your system's ``P4CONFIG`` environment variable -in order to define the Perforce server URL and port, username, and -password if you do not wish to keep those values in a recipe itself. If -you choose not to use ``P4CONFIG``, or to explicitly set variables that -``P4CONFIG`` can contain, you can specify the ``P4PORT`` value, which is -the server's URL and port number, and you can specify a username and -password directly in your recipe within :term:`SRC_URI`. - -Here is an example that relies on ``P4CONFIG`` to specify the server URL -and port, username, and password, and fetches the Head Revision:: - - SRC_URI = "p4://example-depot/main/source/..." - SRCREV = "${AUTOREV}" - PV = "p4-${SRCPV}" - S = "${UNPACKDIR}/p4" - -Here is an example that specifies the server URL and port, username, and -password, and fetches a Revision based on a Label:: - - P4PORT = "tcp:p4server.example.net:1666" - SRC_URI = "p4://user:passwd@example-depot/main/source/..." - SRCREV = "release-1.0" - PV = "p4-${SRCPV}" - S = "${UNPACKDIR}/p4" - -.. note:: - - You should always set S to "${UNPACKDIR}/p4" in your recipe. - -By default, the fetcher strips the depot location from the local file paths. In -the above example, the content of ``example-depot/main/source/`` will be placed -in ``${UNPACKDIR}/p4``. For situations where preserving parts of the remote depot -paths locally is desirable, the fetcher supports two parameters: - -- *"module":* - The top-level depot location or directory to fetch. The value of this - parameter can also point to a single file within the depot, in which case - the local file path will include the module path. -- *"remotepath":* - When used with the value "``keep``", the fetcher will mirror the full depot - paths locally for the specified location, even in combination with the - ``module`` parameter. - -Here is an example use of the the ``module`` parameter:: - - SRC_URI = "p4://user:passwd@example-depot/main;module=source/..." - -In this case, the content of the top-level directory ``source/`` will be fetched -to ``${P4DIR}``, including the directory itself. The top-level directory will -be accesible at ``${P4DIR}/source/``. - -Here is an example use of the the ``remotepath`` parameter:: - - SRC_URI = "p4://user:passwd@example-depot/main;module=source/...;remotepath=keep" - -In this case, the content of the top-level directory ``source/`` will be fetched -to ``${P4DIR}``, but the complete depot paths will be mirrored locally. The -top-level directory will be accessible at -``${P4DIR}/example-depot/main/source/``. - -.. _repo-fetcher: - -Repo Fetcher (``repo://``) --------------------------- - -This fetcher submodule fetches code from ``google-repo`` source control -system. The fetcher works by initiating and syncing sources of the -repository into :term:`REPODIR`, which is usually -``${DL_DIR}/repo``. - -This fetcher supports the following parameters: - -- *"protocol":* Protocol to fetch the repository manifest (default: - git). - -- *"branch":* Branch or tag of repository to get (default: master). - -- *"manifest":* Name of the manifest file (default: ``default.xml``). - -Here are some example URLs:: - - SRC_URI = "repo://REPOROOT;protocol=git;branch=some_branch;manifest=my_manifest.xml" - SRC_URI = "repo://REPOROOT;protocol=file;branch=some_branch;manifest=my_manifest.xml" - -.. _az-fetcher: - -Az Fetcher (``az://``) --------------------------- - -This submodule fetches data from an -`Azure Storage account `__ , -it inherits its functionality from the HTTP wget fetcher, but modifies its -behavior to accomodate the usage of a -`Shared Access Signature (SAS) `__ -for non-public data. - -Such functionality is set by the variable: - -- :term:`AZ_SAS`: The Azure Storage Shared Access Signature provides secure - delegate access to resources, if this variable is set, the Az Fetcher will - use it when fetching artifacts from the cloud. - -You can specify the AZ_SAS variable prefixed with a ? as shown below:: - - AZ_SAS = "?se=2021-01-01&sp=r&sv=2018-11-09&sr=c&skoid=&sig=" - -Here is an example URL:: - - SRC_URI = "az://.blob.core.windows.net//" - -It can also be used when setting mirrors definitions using the :term:`PREMIRRORS` variable. - -.. _gcp-fetcher: - -GCP Fetcher (``gs://``) --------------------------- - -This submodule fetches data from a -`Google Cloud Storage Bucket `__. -It uses the `Google Cloud Storage Python Client `__ -to check the status of objects in the bucket and download them. -The use of the Python client makes it substantially faster than using command -line tools such as gsutil. - -The fetcher requires the Google Cloud Storage Python Client to be installed, along -with the gsutil tool. - -The fetcher requires that the machine has valid credentials for accessing the -chosen bucket. Instructions for authentication can be found in the -`Google Cloud documentation `__. - -If it used from the OpenEmbedded build system, the fetcher can be used for -fetching sstate artifacts from a GCS bucket by specifying the -``SSTATE_MIRRORS`` variable as shown below:: - - SSTATE_MIRRORS ?= "\ - file://.* gs:///PATH \ - " - -The fetcher can also be used in recipes:: - - SRC_URI = "gs:////" - -However, the checksum of the file should be also be provided:: - - SRC_URI[sha256sum] = "" - -.. _crate-fetcher: - -Crate Fetcher (``crate://``) ----------------------------- - -This submodule fetches code for -`Rust language "crates" `__ -corresponding to Rust libraries and programs to compile. Such crates are typically shared -on https://crates.io/ but this fetcher supports other crate registries too. - -The format for the :term:`SRC_URI` setting must be:: - - SRC_URI = "crate://REGISTRY/NAME/VERSION" - -Here is an example URL:: - - SRC_URI = "crate://crates.io/glob/0.2.11" - -.. _npm-fetcher: - -NPM Fetcher (``npm://``) ------------------------- - -This submodule fetches source code from an -`NPM `__ -Javascript package registry. - -The format for the :term:`SRC_URI` setting must be:: - - SRC_URI = "npm://some.registry.url;ParameterA=xxx;ParameterB=xxx;..." - -This fetcher supports the following parameters: - -- *"package":* The NPM package name. This is a mandatory parameter. - -- *"version":* The NPM package version. This is a mandatory parameter. - -- *"downloadfilename":* Specifies the filename used when storing the downloaded file. - -- *"destsuffix":* Specifies the directory to use to unpack the package (default: ``npm``). - -Note that NPM fetcher only fetches the package source itself. The dependencies -can be fetched through the `npmsw-fetcher`_. - -Here is an example URL with both fetchers:: - - SRC_URI = " \ - npm://registry.npmjs.org/;package=cute-files;version=${PV} \ - npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \ - " - -See :yocto_docs:`Creating Node Package Manager (NPM) Packages -` -in the Yocto Project manual for details about using -:yocto_docs:`devtool ` -to automatically create a recipe from an NPM URL. - -.. _npmsw-fetcher: - -NPM shrinkwrap Fetcher (``npmsw://``) -------------------------------------- - -This submodule fetches source code from an -`NPM shrinkwrap `__ -description file, which lists the dependencies -of an NPM package while locking their versions. - -The format for the :term:`SRC_URI` setting must be:: - - SRC_URI = "npmsw://some.registry.url;ParameterA=xxx;ParameterB=xxx;..." - -This fetcher supports the following parameters: - -- *"dev":* Set this parameter to ``1`` to install "devDependencies". - -- *"destsuffix":* Specifies the directory to use to unpack the dependencies - (``${S}`` by default). - -Note that the shrinkwrap file can also be provided by the recipe for -the package which has such dependencies, for example:: - - SRC_URI = " \ - npm://registry.npmjs.org/;package=cute-files;version=${PV} \ - npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \ - " - -Such a file can automatically be generated using -:yocto_docs:`devtool ` -as described in the :yocto_docs:`Creating Node Package Manager (NPM) Packages -` -section of the Yocto Project. - -Other Fetchers --------------- - -Fetch submodules also exist for the following: - -- Bazaar (``bzr://``) - -- Mercurial (``hg://``) - -- OSC (``osc://``) - -- S3 (``s3://``) - -- Secure FTP (``sftp://``) - -- Secure Shell (``ssh://``) - -- Trees using Git Annex (``gitannex://``) - -No documentation currently exists for these lesser used fetcher -submodules. However, you might find the code helpful and readable. - -Auto Revisions -============== - -For recipes which need to use the latest revision of their source code, -the way to achieve it is to use :term:`AUTOREV` as the value of the -source code repository's :term:`SRCREV`:: - - SRCREV = "${AUTOREV}" - -.. note:: - - With :term:`AUTOREV`, BitBake will always need to take the additional step of - querying the remote repository to retrieve the latest available revision. - - Also, recipes using it are not part of the parsing-time cache, - and hence are parsed every time. - -Multiple Source Control Repositories -==================================== - -For some recipes it is necessary to make use of more than one -version controlled source code repository. In such case, the recipe -must provide BitBake with information about how it should include -the different SCM revisions in its package version string, instead of its -usual approach with a single :term:`SRCREV`. - -For this purpose, the recipe must set the :term:`SRCREV_FORMAT` -variable. Consider the following example:: - - SRC_URI = " \ - git://git.some.example.com/source-tree.git;name=machine \ - git://git.some.example.com/metadata.git;name=meta \ - " - SRCREV_machine = "3f9db490a81eeb0077be3c5a5aa1388a2372232f" - SRCREV_meta = "1ac1d0ff730fe1dd1371823d562db8126750a98c" - SRCREV_FORMAT ?= "meta_machine" - -The value given to :term:`SRCREV_FORMAT` references names, which were -assigned using the ``name`` parameter in the :term:`SRC_URI` definition, -and which represent the version controlled source code repositories. -In the above example, the :term:`SRC_URI` contained two URLs named -"meta" and "machine". diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.rst deleted file mode 100644 index 654196ca24..0000000000 --- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.rst +++ /dev/null @@ -1,408 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -=================== -Hello World Example -=================== - -BitBake Hello World -=================== - -The simplest example commonly used to demonstrate any new programming -language or tool is the "`Hello -World `__" example. -This appendix demonstrates, in tutorial form, Hello World within the -context of BitBake. The tutorial describes how to create a new project -and the applicable metadata files necessary to allow BitBake to build -it. - -Obtaining BitBake -================= - -See the :ref:`bitbake-user-manual/bitbake-user-manual-intro:obtaining bitbake` section for -information on how to obtain BitBake. Once you have the source code on -your machine, the BitBake directory appears as follows:: - - $ ls -al - total 108 - drwxr-xr-x 9 fawkh 10000 4096 feb 24 12:10 . - drwx------ 36 fawkh 10000 4096 mar 2 17:00 .. - -rw-r--r-- 1 fawkh 10000 365 feb 24 12:10 AUTHORS - drwxr-xr-x 2 fawkh 10000 4096 feb 24 12:10 bin - -rw-r--r-- 1 fawkh 10000 16501 feb 24 12:10 ChangeLog - drwxr-xr-x 2 fawkh 10000 4096 feb 24 12:10 classes - drwxr-xr-x 2 fawkh 10000 4096 feb 24 12:10 conf - drwxr-xr-x 5 fawkh 10000 4096 feb 24 12:10 contrib - drwxr-xr-x 6 fawkh 10000 4096 feb 24 12:10 doc - drwxr-xr-x 8 fawkh 10000 4096 mar 2 16:26 .git - -rw-r--r-- 1 fawkh 10000 31 feb 24 12:10 .gitattributes - -rw-r--r-- 1 fawkh 10000 392 feb 24 12:10 .gitignore - drwxr-xr-x 13 fawkh 10000 4096 feb 24 12:11 lib - -rw-r--r-- 1 fawkh 10000 1224 feb 24 12:10 LICENSE - -rw-r--r-- 1 fawkh 10000 15394 feb 24 12:10 LICENSE.GPL-2.0-only - -rw-r--r-- 1 fawkh 10000 1286 feb 24 12:10 LICENSE.MIT - -rw-r--r-- 1 fawkh 10000 229 feb 24 12:10 MANIFEST.in - -rw-r--r-- 1 fawkh 10000 2413 feb 24 12:10 README - -rw-r--r-- 1 fawkh 10000 43 feb 24 12:10 toaster-requirements.txt - -rw-r--r-- 1 fawkh 10000 2887 feb 24 12:10 TODO - -At this point, you should have BitBake cloned to a directory that -matches the previous listing except for dates and user names. - -Setting Up the BitBake Environment -================================== - -First, you need to be sure that you can run BitBake. Set your working -directory to where your local BitBake files are and run the following -command:: - - $ ./bin/bitbake --version - BitBake Build Tool Core version 2.3.1 - -The console output tells you what version -you are running. - -The recommended method to run BitBake is from a directory of your -choice. To be able to run BitBake from any directory, you need to add -the executable binary to your binary to your shell's environment -``PATH`` variable. First, look at your current ``PATH`` variable by -entering the following:: - - $ echo $PATH - -Next, add the directory location -for the BitBake binary to the ``PATH``. Here is an example that adds the -``/home/scott-lenovo/bitbake/bin`` directory to the front of the -``PATH`` variable:: - - $ export PATH=/home/scott-lenovo/bitbake/bin:$PATH - -You should now be able to enter the ``bitbake`` command from the command -line while working from any directory. - -The Hello World Example -======================= - -The overall goal of this exercise is to build a complete "Hello World" -example utilizing task and layer concepts. Because this is how modern -projects such as OpenEmbedded and the Yocto Project utilize BitBake, the -example provides an excellent starting point for understanding BitBake. - -To help you understand how to use BitBake to build targets, the example -starts with nothing but the ``bitbake`` command, which causes BitBake to -fail and report problems. The example progresses by adding pieces to the -build to eventually conclude with a working, minimal "Hello World" -example. - -While every attempt is made to explain what is happening during the -example, the descriptions cannot cover everything. You can find further -information throughout this manual. Also, you can actively participate -in the :oe_lists:`/g/bitbake-devel` -discussion mailing list about the BitBake build tool. - -.. note:: - - This example was inspired by and drew heavily from - `Mailing List post - The BitBake equivalent of "Hello, World!" - `_. - -As stated earlier, the goal of this example is to eventually compile -"Hello World". However, it is unknown what BitBake needs and what you -have to provide in order to achieve that goal. Recall that BitBake -utilizes three types of metadata files: -:ref:`bitbake-user-manual/bitbake-user-manual-intro:configuration files`, -:ref:`bitbake-user-manual/bitbake-user-manual-intro:classes`, and -:ref:`bitbake-user-manual/bitbake-user-manual-intro:recipes`. -But where do they go? How does BitBake find -them? BitBake's error messaging helps you answer these types of -questions and helps you better understand exactly what is going on. - -Following is the complete "Hello World" example. - -#. **Create a Project Directory:** First, set up a directory for the - "Hello World" project. Here is how you can do so in your home - directory:: - - $ mkdir ~/hello - $ cd ~/hello - - This is the directory that - BitBake will use to do all of its work. You can use this directory - to keep all the metafiles needed by BitBake. Having a project - directory is a good way to isolate your project. - -#. **Run BitBake:** At this point, you have nothing but a project - directory. Run the ``bitbake`` command and see what it does:: - - $ bitbake - ERROR: The BBPATH variable is not set and bitbake did not find a conf/bblayers.conf file in the expected location. - Maybe you accidentally invoked bitbake from the wrong directory? - - When you run BitBake, it begins looking for metadata files. The - :term:`BBPATH` variable is what tells BitBake where - to look for those files. :term:`BBPATH` is not set and you need to set - it. Without :term:`BBPATH`, BitBake cannot find any configuration files - (``.conf``) or recipe files (``.bb``) at all. BitBake also cannot - find the ``bitbake.conf`` file. - -#. **Setting BBPATH:** For this example, you can set :term:`BBPATH` in - the same manner that you set ``PATH`` earlier in the appendix. You - should realize, though, that it is much more flexible to set the - :term:`BBPATH` variable up in a configuration file for each project. - - From your shell, enter the following commands to set and export the - :term:`BBPATH` variable:: - - $ BBPATH="projectdirectory" - $ export BBPATH - - Use your actual project directory in the command. BitBake uses that - directory to find the metadata it needs for your project. - - .. note:: - - When specifying your project directory, do not use the tilde - ("~") character as BitBake does not expand that character as the - shell would. - -#. **Run BitBake:** Now that you have :term:`BBPATH` defined, run the - ``bitbake`` command again:: - - $ bitbake - ERROR: Unable to parse /home/scott-lenovo/bitbake/lib/bb/parse/__init__.py - Traceback (most recent call last): - File "/home/scott-lenovo/bitbake/lib/bb/parse/__init__.py", line 127, in resolve_file(fn='conf/bitbake.conf', d=): - if not newfn: - > raise IOError(errno.ENOENT, "file %s not found in %s" % (fn, bbpath)) - fn = newfn - FileNotFoundError: [Errno 2] file conf/bitbake.conf not found in - - - This sample output shows that BitBake could not find the - ``conf/bitbake.conf`` file in the project directory. This file is - the first thing BitBake must find in order to build a target. And, - since the project directory for this example is empty, you need to - provide a ``conf/bitbake.conf`` file. - -#. **Creating conf/bitbake.conf:** The ``conf/bitbake.conf`` includes - a number of configuration variables BitBake uses for metadata and - recipe files. For this example, you need to create the file in your - project directory and define some key BitBake variables. For more - information on the ``bitbake.conf`` file, see - https://git.openembedded.org/bitbake/tree/conf/bitbake.conf. - - Use the following commands to create the ``conf`` directory in the - project directory:: - - $ mkdir conf - - From within the ``conf`` directory, - use some editor to create the ``bitbake.conf`` so that it contains - the following:: - - PN = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}" - - TMPDIR = "${TOPDIR}/tmp" - CACHE = "${TMPDIR}/cache" - STAMP = "${TMPDIR}/${PN}/stamps" - T = "${TMPDIR}/${PN}/work" - B = "${TMPDIR}/${PN}" - - .. note:: - - Without a value for :term:`PN`, the variables :term:`STAMP`, :term:`T`, and :term:`B`, prevent more - than one recipe from working. You can fix this by either setting :term:`PN` to - have a value similar to what OpenEmbedded and BitBake use in the default - ``bitbake.conf`` file (see previous example). Or, by manually updating each - recipe to set :term:`PN`. You will also need to include :term:`PN` as part of the :term:`STAMP`, - :term:`T`, and :term:`B` variable definitions in the ``local.conf`` file. - - The ``TMPDIR`` variable establishes a directory that BitBake uses - for build output and intermediate files other than the cached - information used by the - :ref:`bitbake-user-manual/bitbake-user-manual-execution:setscene` - process. Here, the ``TMPDIR`` directory is set to ``hello/tmp``. - - .. tip:: - - You can always safely delete the tmp directory in order to rebuild a - BitBake target. The build process creates the directory for you when you - run BitBake. - - For information about each of the other variables defined in this - example, check :term:`PN`, :term:`TOPDIR`, :term:`CACHE`, :term:`STAMP`, - :term:`T` or :term:`B` to take you to the definitions in the - glossary. - -#. **Run BitBake:** After making sure that the ``conf/bitbake.conf`` file - exists, you can run the ``bitbake`` command again:: - - $ bitbake - ERROR: Unable to parse /home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py - Traceback (most recent call last): - File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 67, in inherit(files=['base'], fn='configuration INHERITs', lineno=0, d=): - if not os.path.exists(file): - > raise ParseError("Could not inherit file %s" % (file), fn, lineno) - - bb.parse.ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass - - - In the sample output, - BitBake could not find the ``classes/base.bbclass`` file. You need - to create that file next. - -#. **Creating classes/base.bbclass:** BitBake uses class files to - provide common code and functionality. The minimally required class - for BitBake is the ``classes/base.bbclass`` file. The ``base`` class - is implicitly inherited by every recipe. BitBake looks for the class - in the ``classes`` directory of the project (i.e ``hello/classes`` - in this example). - - Create the ``classes`` directory as follows:: - - $ cd $HOME/hello - $ mkdir classes - - Move to the ``classes`` directory and then create the - ``base.bbclass`` file by inserting this single line:: - - addtask build - - The minimal task that BitBake runs is the ``do_build`` task. This is - all the example needs in order to build the project. Of course, the - ``base.bbclass`` can have much more depending on which build - environments BitBake is supporting. - -#. **Run BitBake:** After making sure that the ``classes/base.bbclass`` - file exists, you can run the ``bitbake`` command again:: - - $ bitbake - Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information. - - BitBake is finally reporting - no errors. However, you can see that it really does not have - anything to do. You need to create a recipe that gives BitBake - something to do. - -#. **Creating a Layer:** While it is not really necessary for such a - small example, it is good practice to create a layer in which to - keep your code separate from the general metadata used by BitBake. - Thus, this example creates and uses a layer called "mylayer". - - .. note:: - - You can find additional information on layers in the - ":ref:`bitbake-user-manual/bitbake-user-manual-intro:Layers`" section. - - Minimally, you need a recipe file and a layer configuration file in - your layer. The configuration file needs to be in the ``conf`` - directory inside the layer. Use these commands to set up the layer - and the ``conf`` directory:: - - $ cd $HOME - $ mkdir mylayer - $ cd mylayer - $ mkdir conf - - Move to the ``conf`` directory and create a ``layer.conf`` file that has the - following:: - - BBPATH .= ":${LAYERDIR}" - BBFILES += "${LAYERDIR}/*.bb" - BBFILE_COLLECTIONS += "mylayer" - BBFILE_PATTERN_mylayer := "^${LAYERDIR_RE}/" - LAYERSERIES_CORENAMES = "hello_world_example" - LAYERSERIES_COMPAT_mylayer = "hello_world_example" - - For information on these variables, click on :term:`BBFILES`, - :term:`LAYERDIR`, :term:`BBFILE_COLLECTIONS`, :term:`BBFILE_PATTERN_mylayer ` - or :term:`LAYERSERIES_COMPAT` to go to the definitions in the glossary. - - .. note:: - - We are setting both ``LAYERSERIES_CORENAMES`` and :term:`LAYERSERIES_COMPAT` in this particular case, because we - are using bitbake without OpenEmbedded. - You should usually just use :term:`LAYERSERIES_COMPAT` to specify the OE-Core versions for which your layer - is compatible, and add the meta-openembedded layer to your project. - - You need to create the recipe file next. Inside your layer at the - top-level, use an editor and create a recipe file named - ``printhello.bb`` that has the following:: - - DESCRIPTION = "Prints Hello World" - PN = 'printhello' - PV = '1' - - python do_build() { - bb.plain("********************"); - bb.plain("* *"); - bb.plain("* Hello, World! *"); - bb.plain("* *"); - bb.plain("********************"); - } - - The recipe file simply provides - a description of the recipe, the name, version, and the ``do_build`` - task, which prints out "Hello World" to the console. For more - information on :term:`DESCRIPTION`, :term:`PN` or :term:`PV` - follow the links to the glossary. - -#. **Run BitBake With a Target:** Now that a BitBake target exists, run - the command and provide that target:: - - $ cd $HOME/hello - $ bitbake printhello - ERROR: no recipe files to build, check your BBPATH and BBFILES? - - Summary: There was 1 ERROR message shown, returning a non-zero exit code. - - We have created the layer with the recipe and - the layer configuration file but it still seems that BitBake cannot - find the recipe. BitBake needs a ``conf/bblayers.conf`` that lists - the layers for the project. Without this file, BitBake cannot find - the recipe. - -#. **Creating conf/bblayers.conf:** BitBake uses the - ``conf/bblayers.conf`` file to locate layers needed for the project. - This file must reside in the ``conf`` directory of the project (i.e. - ``hello/conf`` for this example). - - Set your working directory to the ``hello/conf`` directory and then - create the ``bblayers.conf`` file so that it contains the following:: - - BBLAYERS ?= " \ - /home//mylayer \ - " - - You need to provide your own information for ``you`` in the file. - -#. **Run BitBake With a Target:** Now that you have supplied the - ``bblayers.conf`` file, run the ``bitbake`` command and provide the - target:: - - $ bitbake printhello - Loading cache: 100% | - Loaded 0 entries from dependency cache. - Parsing recipes: 100% |##################################################################################| - Parsing of 1 .bb files complete (0 cached, 1 parsed). 1 targets, 0 skipped, 0 masked, 0 errors. - NOTE: Resolving any missing task queue dependencies - Initialising tasks: 100% |###############################################################################| - NOTE: No setscene tasks - NOTE: Executing Tasks - ******************** - * * - * Hello, World! * - * * - ******************** - NOTE: Tasks Summary: Attempted 1 tasks of which 0 didn't need to be rerun and all succeeded. - - .. note:: - - After the first execution, re-running bitbake printhello again will not - result in a BitBake run that prints the same console output. The reason - for this is that the first time the printhello.bb recipe's do_build task - executes successfully, BitBake writes a stamp file for the task. Thus, - the next time you attempt to run the task using that same bitbake - command, BitBake notices the stamp and therefore determines that the task - does not need to be re-run. If you delete the tmp directory or run - bitbake -c clean printhello and then re-run the build, the "Hello, - World!" message will be printed again. diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.rst deleted file mode 100644 index 9837b009ea..0000000000 --- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.rst +++ /dev/null @@ -1,709 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -======== -Overview -======== - -| - -Welcome to the BitBake User Manual. This manual provides information on -the BitBake tool. The information attempts to be as independent as -possible regarding systems that use BitBake, such as OpenEmbedded and -the Yocto Project. In some cases, scenarios or examples within the -context of a build system are used in the manual to help with -understanding. For these cases, the manual clearly states the context. - -.. _intro: - -Introduction -============ - -Fundamentally, BitBake is a generic task execution engine that allows -shell and Python tasks to be run efficiently and in parallel while -working within complex inter-task dependency constraints. One of -BitBake's main users, OpenEmbedded, takes this core and builds embedded -Linux software stacks using a task-oriented approach. - -Conceptually, BitBake is similar to GNU Make in some regards but has -significant differences: - -- BitBake executes tasks according to the provided metadata that builds up - the tasks. Metadata is stored in recipe (``.bb``) and related recipe - "append" (``.bbappend``) files, configuration (``.conf``) and - underlying include (``.inc``) files, and in class (``.bbclass``) - files. The metadata provides BitBake with instructions on what tasks - to run and the dependencies between those tasks. - -- BitBake includes a fetcher library for obtaining source code from - various places such as local files, source control systems, or - websites. - -- The instructions for each unit to be built (e.g. a piece of software) - are known as "recipe" files and contain all the information about the - unit (dependencies, source file locations, checksums, description and - so on). - -- BitBake includes a client/server abstraction and can be used from a - command line or used as a service over XML-RPC and has several - different user interfaces. - -History and Goals -================= - -BitBake was originally a part of the OpenEmbedded project. It was -inspired by the Portage package management system used by the Gentoo -Linux distribution. On December 7, 2004, OpenEmbedded project team -member Chris Larson split the project into two distinct pieces: - -- BitBake, a generic task executor - -- OpenEmbedded, a metadata set utilized by BitBake - -Today, BitBake is the primary basis of the -`OpenEmbedded `__ project, which is being -used to build and maintain Linux distributions such as the `Poky -Reference Distribution `__, -developed under the umbrella of the `Yocto Project `__. - -Prior to BitBake, no other build tool adequately met the needs of an -aspiring embedded Linux distribution. All of the build systems used by -traditional desktop Linux distributions lacked important functionality, -and none of the ad hoc Buildroot-based systems, prevalent in the -embedded space, were scalable or maintainable. - -Some important original goals for BitBake were: - -- Handle cross-compilation. - -- Handle inter-package dependencies (build time on target architecture, - build time on native architecture, and runtime). - -- Support running any number of tasks within a given package, - including, but not limited to, fetching upstream sources, unpacking - them, patching them, configuring them, and so forth. - -- Be Linux distribution agnostic for both build and target systems. - -- Be architecture agnostic. - -- Support multiple build and target operating systems (e.g. Cygwin, the - BSDs, and so forth). - -- Be self-contained, rather than tightly integrated into the build - machine's root filesystem. - -- Handle conditional metadata on the target architecture, operating - system, distribution, and machine. - -- Be easy to use the tools to supply local metadata and packages - against which to operate. - -- Be easy to use BitBake to collaborate between multiple projects for - their builds. - -- Provide an inheritance mechanism to share common metadata between - many packages. - -Over time it became apparent that some further requirements were -necessary: - -- Handle variants of a base recipe (e.g. native, sdk, and multilib). - -- Split metadata into layers and allow layers to enhance or override - other layers. - -- Allow representation of a given set of input variables to a task as a - checksum. Based on that checksum, allow acceleration of builds with - prebuilt components. - -BitBake satisfies all the original requirements and many more with -extensions being made to the basic functionality to reflect the -additional requirements. Flexibility and power have always been the -priorities. BitBake is highly extensible and supports embedded Python -code and execution of any arbitrary tasks. - -.. _Concepts: - -Concepts -======== - -BitBake is a program written in the Python language. At the highest -level, BitBake interprets metadata, decides what tasks are required to -run, and executes those tasks. Similar to GNU Make, BitBake controls how -software is built. GNU Make achieves its control through "makefiles", -while BitBake uses "recipes". - -BitBake extends the capabilities of a simple tool like GNU Make by -allowing for the definition of much more complex tasks, such as -assembling entire embedded Linux distributions. - -The remainder of this section introduces several concepts that should be -understood in order to better leverage the power of BitBake. - -Recipes -------- - -BitBake Recipes, which are denoted by the file extension ``.bb``, are -the most basic metadata files. These recipe files provide BitBake with -the following: - -- Descriptive information about the package (author, homepage, license, - and so on) - -- The version of the recipe - -- Existing dependencies (both build and runtime dependencies) - -- Where the source code resides and how to fetch it - -- Whether the source code requires any patches, where to find them, and - how to apply them - -- How to configure and compile the source code - -- How to assemble the generated artifacts into one or more installable - packages - -- Where on the target machine to install the package or packages - created - -Within the context of BitBake, or any project utilizing BitBake as its -build system, files with the ``.bb`` extension are referred to as -recipes. - -.. note:: - - The term "package" is also commonly used to describe recipes. - However, since the same word is used to describe packaged output from - a project, it is best to maintain a single descriptive term - - "recipes". Put another way, a single "recipe" file is quite capable - of generating a number of related but separately installable - "packages". In fact, that ability is fairly common. - -Configuration Files -------------------- - -Configuration files, which are denoted by the ``.conf`` extension, -define various configuration variables that govern the project's build -process. These files fall into several areas that define machine -configuration, distribution configuration, possible compiler tuning, -general common configuration, and user configuration. The main -configuration file is the sample ``bitbake.conf`` file, which is located -within the BitBake source tree ``conf`` directory. - -Classes -------- - -Class files, which are denoted by the ``.bbclass`` extension, contain -information that is useful to share between metadata files. The BitBake -source tree currently comes with one class metadata file called -``base.bbclass``. You can find this file in the ``classes`` directory. -The ``base.bbclass`` class files is special since it is always included -automatically for all recipes and classes. This class contains -definitions for standard basic tasks such as fetching, unpacking, -configuring (empty by default), compiling (runs any Makefile present), -installing (empty by default) and packaging (empty by default). These -tasks are often overridden or extended by other classes added during the -project development process. - -Class Types -~~~~~~~~~~~ - -BitBake supports class files installed in three different directories: - -- ``classes-global/``: these classes must be inherited globally through the - :term:`INHERIT` variable in a :ref:`configuration file - `. These - classes are included for every recipe being built. For example, you would use - the global class named ``myclass`` like so:: - - INHERIT += "myclass" - -- ``classes-recipe/``: these classes must be inherited from a recipe using the - :ref:`inherit ` directive. They do - not support being inherited globally. For example, you would use the recipe - class named ``myclass`` like so:: - - inherit myclass - -- ``classes/``: this final directory is meant for classes that can be used in - the two contexts explain above. In other words, they can be inherit either - globally or in a recipe. - -For details on how BitBake locates class files, see the -:ref:`bitbake-user-manual/bitbake-user-manual-metadata:Locating Class Files` -section of the Bitbake User Manual. - -Layers ------- - -Layers allow you to isolate different types of customizations from each -other. While you might find it tempting to keep everything in one layer -when working on a single project, the more modular your metadata, the -easier it is to cope with future changes. - -To illustrate how you can use layers to keep things modular, consider -customizations you might make to support a specific target machine. -These types of customizations typically reside in a special layer, -rather than a general layer, called a Board Support Package (BSP) layer. -Furthermore, the machine customizations should be isolated from recipes -and metadata that support a new GUI environment, for example. This -situation gives you a couple of layers: one for the machine -configurations and one for the GUI environment. It is important to -understand, however, that the BSP layer can still make machine-specific -additions to recipes within the GUI environment layer without polluting -the GUI layer itself with those machine-specific changes. You can -accomplish this through a recipe that is a BitBake append -(``.bbappend``) file. - -.. _append-bbappend-files: - -Append Files ------------- - -Append files, which are files that have the ``.bbappend`` file -extension, extend or override information in an existing recipe file. - -BitBake expects every append file to have a corresponding recipe file. -Furthermore, the append file and corresponding recipe file must use the -same root filename. The filenames can differ only in the file type -suffix used (e.g. ``formfactor_0.0.bb`` and -``formfactor_0.0.bbappend``). - -Information in append files extends or overrides the information in the -underlying, similarly-named recipe files. - -When you name an append file, you can use the "``%``" wildcard character -to allow for matching recipe names. For example, suppose you have an -append file named as follows:: - - busybox_1.21.%.bbappend - -That append file -would match any ``busybox_1.21.``\ x\ ``.bb`` version of the recipe. So, -the append file would match the following recipe names:: - - busybox_1.21.1.bb - busybox_1.21.2.bb - busybox_1.21.3.bb - busybox_1.21.10.bb - busybox_1.21.11.bb - -.. note:: - - The use of the " % " character is limited in that it only works directly in - front of the .bbappend portion of the append file's name. You cannot use the - wildcard character in any other location of the name. - -If the ``busybox`` recipe was updated to ``busybox_1.3.0.bb``, the -append name would not match. However, if you named the append file -``busybox_1.%.bbappend``, then you would have a match. - -In the most general case, you could name the append file something as -simple as ``busybox_%.bbappend`` to be entirely version independent. - -Obtaining BitBake -================= - -You can obtain BitBake several different ways: - -- **Cloning BitBake:** Using Git to clone the BitBake source code - repository is the recommended method for obtaining BitBake. Cloning - the repository makes it easy to get bug fixes and have access to - stable branches and the master branch. Once you have cloned BitBake, - you should use the latest stable branch for development since the - master branch is for BitBake development and might contain less - stable changes. - - You usually need a version of BitBake that matches the metadata you - are using. The metadata is generally backwards compatible but not - forward compatible. - - Here is an example that clones the BitBake repository:: - - $ git clone git://git.openembedded.org/bitbake - - This command clones the BitBake - Git repository into a directory called ``bitbake``. Alternatively, - you can designate a directory after the ``git clone`` command if you - want to call the new directory something other than ``bitbake``. Here - is an example that names the directory ``bbdev``:: - - $ git clone git://git.openembedded.org/bitbake bbdev - -- **Installation using your Distribution Package Management System:** - This method is not recommended because the BitBake version that is - provided by your distribution, in most cases, is several releases - behind a snapshot of the BitBake repository. - -- **Taking a snapshot of BitBake:** Downloading a snapshot of BitBake - from the source code repository gives you access to a known branch or - release of BitBake. - - .. note:: - - Cloning the Git repository, as described earlier, is the preferred - method for getting BitBake. Cloning the repository makes it easier - to update as patches are added to the stable branches. - - The following example downloads a snapshot of BitBake version 1.17.0:: - - $ wget https://git.openembedded.org/bitbake/snapshot/bitbake-1.17.0.tar.gz - $ tar zxpvf bitbake-1.17.0.tar.gz - - After extraction of the tarball using - the tar utility, you have a directory entitled ``bitbake-1.17.0``. - -- **Using the BitBake that Comes With Your Build Checkout:** A final - possibility for getting a copy of BitBake is that it already comes - with your checkout of a larger BitBake-based build system, such as - Poky. Rather than manually checking out individual layers and gluing - them together yourself, you can check out an entire build system. The - checkout will already include a version of BitBake that has been - thoroughly tested for compatibility with the other components. For - information on how to check out a particular BitBake-based build - system, consult that build system's supporting documentation. - -.. _bitbake-user-manual-command: - -The BitBake Command -=================== - -The ``bitbake`` command is the primary interface to the BitBake tool. -This section presents the BitBake command syntax and provides several -execution examples. - -Usage and syntax ----------------- - -Following is the usage and syntax for BitBake:: - - $ bitbake -h - usage: bitbake [-s] [-e] [-g] [-u UI] [--version] [-h] [-f] [-c CMD] - [-C INVALIDATE_STAMP] [--runall RUNALL] [--runonly RUNONLY] - [--no-setscene] [--skip-setscene] [--setscene-only] [-n] [-p] - [-k] [-P] [-S SIGNATURE_HANDLER] [--revisions-changed] - [-b BUILDFILE] [-D] [-l DEBUG_DOMAINS] [-v] [-q] - [-w WRITEEVENTLOG] [-B BIND] [-T SERVER_TIMEOUT] - [--remote-server REMOTE_SERVER] [-m] [--token XMLRPCTOKEN] - [--observe-only] [--status-only] [--server-only] [-r PREFILE] - [-R POSTFILE] [-I EXTRA_ASSUME_PROVIDED] - [recipename/target ...] - - It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH - which will provide the layer, BBFILES and other configuration information. - - General options: - recipename/target Execute the specified task (default is 'build') for - these target recipes (.bb files). - -s, --show-versions Show current and preferred versions of all recipes. - -e, --environment Show the global or per-recipe environment complete - with information about where variables were - set/changed. - -g, --graphviz Save dependency tree information for the specified - targets in the dot syntax. - -u UI, --ui UI The user interface to use (knotty, ncurses, taskexp, - taskexp_ncurses or teamcity - default knotty). - --version Show programs version and exit. - -h, --help Show this help message and exit. - - Task control options: - -f, --force Force the specified targets/task to run (invalidating - any existing stamp file). - -c CMD, --cmd CMD Specify the task to execute. The exact options - available depend on the metadata. Some examples might - be 'compile' or 'populate_sysroot' or 'listtasks' may - give a list of the tasks available. - -C INVALIDATE_STAMP, --clear-stamp INVALIDATE_STAMP - Invalidate the stamp for the specified task such as - 'compile' and then run the default task for the - specified target(s). - --runall RUNALL Run the specified task for any recipe in the taskgraph - of the specified target (even if it wouldn't otherwise - have run). - --runonly RUNONLY Run only the specified task within the taskgraph of - the specified targets (and any task dependencies those - tasks may have). - --no-setscene Do not run any setscene tasks. sstate will be ignored - and everything needed, built. - --skip-setscene Skip setscene tasks if they would be executed. Tasks - previously restored from sstate will be kept, unlike - --no-setscene. - --setscene-only Only run setscene tasks, don't run any real tasks. - - Execution control options: - -n, --dry-run Don't execute, just go through the motions. - -p, --parse-only Quit after parsing the BB recipes. - -k, --continue Continue as much as possible after an error. While the - target that failed and anything depending on it cannot - be built, as much as possible will be built before - stopping. - -P, --profile Profile the command and save reports. - -S SIGNATURE_HANDLER, --dump-signatures SIGNATURE_HANDLER - Dump out the signature construction information, with - no task execution. The SIGNATURE_HANDLER parameter is - passed to the handler. Two common values are none and - printdiff but the handler may define more/less. none - means only dump the signature, printdiff means - recursively compare the dumped signature with the most - recent one in a local build or sstate cache (can be - used to find out why tasks re-run when that is not - expected) - --revisions-changed Set the exit code depending on whether upstream - floating revisions have changed or not. - -b BUILDFILE, --buildfile BUILDFILE - Execute tasks from a specific .bb recipe directly. - WARNING: Does not handle any dependencies from other - recipes. - - Logging/output control options: - -D, --debug Increase the debug level. You can specify this more - than once. -D sets the debug level to 1, where only - bb.debug(1, ...) messages are printed to stdout; -DD - sets the debug level to 2, where both bb.debug(1, ...) - and bb.debug(2, ...) messages are printed; etc. - Without -D, no debug messages are printed. Note that - -D only affects output to stdout. All debug messages - are written to ${T}/log.do_taskname, regardless of the - debug level. - -l DEBUG_DOMAINS, --log-domains DEBUG_DOMAINS - Show debug logging for the specified logging domains. - -v, --verbose Enable tracing of shell tasks (with 'set -x'). Also - print bb.note(...) messages to stdout (in addition to - writing them to ${T}/log.do_). - -q, --quiet Output less log message data to the terminal. You can - specify this more than once. - -w WRITEEVENTLOG, --write-log WRITEEVENTLOG - Writes the event log of the build to a bitbake event - json file. Use '' (empty string) to assign the name - automatically. - - Server options: - -B BIND, --bind BIND The name/address for the bitbake xmlrpc server to bind - to. - -T SERVER_TIMEOUT, --idle-timeout SERVER_TIMEOUT - Set timeout to unload bitbake server due to - inactivity, set to -1 means no unload, default: - Environment variable BB_SERVER_TIMEOUT. - --remote-server REMOTE_SERVER - Connect to the specified server. - -m, --kill-server Terminate any running bitbake server. - --token XMLRPCTOKEN Specify the connection token to be used when - connecting to a remote server. - --observe-only Connect to a server as an observing-only client. - --status-only Check the status of the remote bitbake server. - --server-only Run bitbake without a UI, only starting a server - (cooker) process. - - Configuration options: - -r PREFILE, --read PREFILE - Read the specified file before bitbake.conf. - -R POSTFILE, --postread POSTFILE - Read the specified file after bitbake.conf. - -I EXTRA_ASSUME_PROVIDED, --ignore-deps EXTRA_ASSUME_PROVIDED - Assume these dependencies don't exist and are already - provided (equivalent to ASSUME_PROVIDED). Useful to - make dependency graphs more appealing. - -.. - Bitbake help output generated with "stty columns 80; bin/bitbake -h" - -.. _bitbake-examples: - -Examples --------- - -This section presents some examples showing how to use BitBake. - -.. _example-executing-a-task-against-a-single-recipe: - -Executing a Task Against a Single Recipe -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Executing tasks for a single recipe file is relatively simple. You -specify the file in question, and BitBake parses it and executes the -specified task. If you do not specify a task, BitBake executes the -default task, which is "build". BitBake obeys inter-task dependencies -when doing so. - -The following command runs the build task, which is the default task, on -the ``foo_1.0.bb`` recipe file:: - - $ bitbake -b foo_1.0.bb - -The following command runs the clean task on the ``foo.bb`` recipe file:: - - $ bitbake -b foo.bb -c clean - -.. note:: - - The "-b" option explicitly does not handle recipe dependencies. Other - than for debugging purposes, it is instead recommended that you use - the syntax presented in the next section. - -Executing Tasks Against a Set of Recipe Files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are a number of additional complexities introduced when one wants -to manage multiple ``.bb`` files. Clearly there needs to be a way to -tell BitBake what files are available and, of those, which you want to -execute. There also needs to be a way for each recipe to express its -dependencies, both for build-time and runtime. There must be a way for -you to express recipe preferences when multiple recipes provide the same -functionality, or when there are multiple versions of a recipe. - -The ``bitbake`` command, when not using "--buildfile" or "-b" only -accepts a "PROVIDES". You cannot provide anything else. By default, a -recipe file generally "PROVIDES" its "packagename" as shown in the -following example:: - - $ bitbake foo - -This next example "PROVIDES" the -package name and also uses the "-c" option to tell BitBake to just -execute the ``do_clean`` task:: - - $ bitbake -c clean foo - -Executing a List of Task and Recipe Combinations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The BitBake command line supports specifying different tasks for -individual targets when you specify multiple targets. For example, -suppose you had two targets (or recipes) ``myfirstrecipe`` and -``mysecondrecipe`` and you needed BitBake to run ``taskA`` for the first -recipe and ``taskB`` for the second recipe:: - - $ bitbake myfirstrecipe:do_taskA mysecondrecipe:do_taskB - -Generating Dependency Graphs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -BitBake is able to generate dependency graphs using the ``dot`` syntax. -You can convert these graphs into images using the ``dot`` tool from -`Graphviz `__. - -When you generate a dependency graph, BitBake writes two files to the -current working directory: - -- ``task-depends.dot``: Shows dependencies between tasks. These - dependencies match BitBake's internal task execution list. - -- ``pn-buildlist``: Shows a simple list of targets that are to be - built. - -To stop depending on common depends, use the ``-I`` depend option and -BitBake omits them from the graph. Leaving this information out can -produce more readable graphs. This way, you can remove from the graph -:term:`DEPENDS` from inherited classes such as ``base.bbclass``. - -Here are two examples that create dependency graphs. The second example -omits depends common in OpenEmbedded from the graph:: - - $ bitbake -g foo - - $ bitbake -g -I virtual/kernel -I eglibc foo - -Executing a Multiple Configuration Build -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -BitBake is able to build multiple images or packages using a single -command where the different targets require different configurations -(multiple configuration builds). Each target, in this scenario, is -referred to as a "multiconfig". - -To accomplish a multiple configuration build, you must define each -target's configuration separately using a parallel configuration file in -the build directory. The location for these multiconfig configuration -files is specific. They must reside in the current build directory in a -sub-directory of ``conf`` named ``multiconfig``. Following is an example -for two separate targets: - -.. image:: figures/bb_multiconfig_files.png - :align: center - -The reason for this required file hierarchy is because the :term:`BBPATH` -variable is not constructed until the layers are parsed. Consequently, -using the configuration file as a pre-configuration file is not possible -unless it is located in the current working directory. - -Minimally, each configuration file must define the machine and the -temporary directory BitBake uses for the build. Suggested practice -dictates that you do not overlap the temporary directories used during -the builds. - -Aside from separate configuration files for each target, you must also -enable BitBake to perform multiple configuration builds. Enabling is -accomplished by setting the -:term:`BBMULTICONFIG` variable in the -``local.conf`` configuration file. As an example, suppose you had -configuration files for ``target1`` and ``target2`` defined in the build -directory. The following statement in the ``local.conf`` file both -enables BitBake to perform multiple configuration builds and specifies -the two extra multiconfigs:: - - BBMULTICONFIG = "target1 target2" - -Once the target configuration files are in place and BitBake has been -enabled to perform multiple configuration builds, use the following -command form to start the builds:: - - $ bitbake [mc:multiconfigname:]target [[[mc:multiconfigname:]target] ... ] - -Here is an example for two extra multiconfigs: ``target1`` and ``target2``:: - - $ bitbake mc::target mc:target1:target mc:target2:target - -.. _bb-enabling-multiple-configuration-build-dependencies: - -Enabling Multiple Configuration Build Dependencies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Sometimes dependencies can exist between targets (multiconfigs) in a -multiple configuration build. For example, suppose that in order to -build an image for a particular architecture, the root filesystem of -another build for a different architecture needs to exist. In other -words, the image for the first multiconfig depends on the root -filesystem of the second multiconfig. This dependency is essentially -that the task in the recipe that builds one multiconfig is dependent on -the completion of the task in the recipe that builds another -multiconfig. - -To enable dependencies in a multiple configuration build, you must -declare the dependencies in the recipe using the following statement -form:: - - task_or_package[mcdepends] = "mc:from_multiconfig:to_multiconfig:recipe_name:task_on_which_to_depend" - -To better show how to use this statement, consider an example with two -multiconfigs: ``target1`` and ``target2``:: - - image_task[mcdepends] = "mc:target1:target2:image2:rootfs_task" - -In this example, the -``from_multiconfig`` is "target1" and the ``to_multiconfig`` is "target2". The -task on which the image whose recipe contains image_task depends on the -completion of the rootfs_task used to build out image2, which is -associated with the "target2" multiconfig. - -Once you set up this dependency, you can build the "target1" multiconfig -using a BitBake command as follows:: - - $ bitbake mc:target1:image1 - -This command executes all the tasks needed to create ``image1`` for the "target1" -multiconfig. Because of the dependency, BitBake also executes through -the ``rootfs_task`` for the "target2" multiconfig build. - -Having a recipe depend on the root filesystem of another build might not -seem that useful. Consider this change to the statement in the image1 -recipe:: - - image_task[mcdepends] = "mc:target1:target2:image2:image_task" - -In this case, BitBake must create ``image2`` for the "target2" build since -the "target1" build depends on it. - -Because "target1" and "target2" are enabled for multiple configuration -builds and have separate configuration files, BitBake places the -artifacts for each build in the respective temporary build directories. diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-library-functions.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-library-functions.rst deleted file mode 100644 index 09e353945b..0000000000 --- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-library-functions.rst +++ /dev/null @@ -1,59 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -================= -Library Functions -================= - -| - -This chapter lists common library functions available under the ``lib/`` -directory in BitBake. - -These functions can be used in recipes or configuration files with -:ref:`inline-Python ` or :ref:`Python -` functions. - -Logging utilities -================= - -Different logging utilities can be used from Python code in recipes or -configuration files. - -The strings passed below can be formatted with ``str.format()``, for example:: - - bb.warn("Houston, we have a %s", "bit of a problem") - -Formatted string can also be used directly:: - - bb.error("%s, we have a %s" % ("Houston", "big problem")) - -Python f-strings may also be used:: - - h = "Houston" - bb.fatal(f"{h}, we have a critical problem") - -.. automodule:: bb - :members: - debug, - error, - erroronce, - fatal, - note, - plain, - verbnote, - warn, - warnonce, - -``bb.utils`` -============ - -.. automodule:: bb.utils - :members: - :exclude-members: - LogCatcher, - PrCtlError, - VersionStringException, - better_compile, - better_exec, diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst deleted file mode 100644 index e5075a346b..0000000000 --- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst +++ /dev/null @@ -1,2276 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -==================== -Syntax and Operators -==================== - -| - -BitBake files have their own syntax. The syntax has similarities to -several other languages but also has some unique features. This section -describes the available syntax and operators as well as provides -examples. - -Basic Syntax -============ - -This section provides some basic syntax examples. - -Basic Variable Setting ----------------------- - -The following example sets ``VARIABLE`` to "value". This assignment -occurs immediately as the statement is parsed. It is a "hard" -assignment. :: - - VARIABLE = "value" - -As expected, if you include leading or -trailing spaces as part of an assignment, the spaces are retained:: - - VARIABLE = " value" - VARIABLE = "value " - -Setting ``VARIABLE`` to "" sets -it to an empty string, while setting the variable to " " sets it to a -blank space (i.e. these are not the same values). :: - - VARIABLE = "" - VARIABLE = " " - -You can use single quotes instead of double quotes when setting a -variable's value. Doing so allows you to use values that contain the -double quote character:: - - VARIABLE = 'I have a " in my value' - -.. note:: - - Unlike in Bourne shells, single quotes work identically to double - quotes in all other ways. They do not suppress variable expansions. - -Modifying Existing Variables ----------------------------- - -Sometimes you need to modify existing variables. Following are some -cases where you might find you want to modify an existing variable: - -- Customize a recipe that uses the variable. - -- Change a variable's default value used in a ``*.bbclass`` file. - -- Change the variable in a ``*.bbappend`` file to override the variable - in the original recipe. - -- Change the variable in a configuration file so that the value - overrides an existing configuration. - -Changing a variable value can sometimes depend on how the value was -originally assigned and also on the desired intent of the change. In -particular, when you append a value to a variable that has a default -value, the resulting value might not be what you expect. In this case, -the value you provide might replace the value rather than append to the -default value. - -If after you have changed a variable's value and something unexplained -occurs, you can use BitBake to check the actual value of the suspect -variable. You can make these checks for both configuration and recipe -level changes: - -- For configuration changes, use the following:: - - $ bitbake -e - - This - command displays variable values after the configuration files (i.e. - ``local.conf``, ``bblayers.conf``, ``bitbake.conf`` and so forth) - have been parsed. - - .. note:: - - Variables that are exported to the environment are preceded by the - string "export" in the command's output. - -- To find changes to a given variable in a specific recipe, use the - following:: - - $ bitbake recipename -e | grep VARIABLENAME=\" - - This command checks to see if the variable actually makes - it into a specific recipe. - -Line Joining ------------- - -Outside of :ref:`functions `, -BitBake joins any line ending in -a backslash character ("\\") with the following line before parsing -statements. The most common use for the "\\" character is to split -variable assignments over multiple lines, as in the following example:: - - FOO = "bar \ - baz \ - qaz" - -Both the "\\" character and the newline -character that follow it are removed when joining lines. Thus, no -newline characters end up in the value of ``FOO``. - -Consider this additional example where the two assignments both assign -"barbaz" to ``FOO``:: - - FOO = "barbaz" - FOO = "bar\ - baz" - -.. note:: - - BitBake does not interpret escape sequences like "\\n" in variable - values. For these to have an effect, the value must be passed to some - utility that interprets escape sequences, such as - ``printf`` or ``echo -n``. - -Variable Expansion ------------------- - -Variables can reference the contents of other variables using a syntax -that is similar to variable expansion in Bourne shells. The following -assignments result in A containing "aval" and B evaluating to -"preavalpost". :: - - A = "aval" - B = "pre${A}post" - -.. note:: - - Unlike in Bourne shells, the curly braces are mandatory: Only ``${FOO}`` and not - ``$FOO`` is recognized as an expansion of ``FOO``. - -The "=" operator does not immediately expand variable references in the -right-hand side. Instead, expansion is deferred until the variable -assigned to is actually used. The result depends on the current values -of the referenced variables. The following example should clarify this -behavior:: - - A = "${B} baz" - B = "${C} bar" - C = "foo" - *At this point, ${A} equals "foo bar baz"* - C = "qux" - *At this point, ${A} equals "qux bar baz"* - B = "norf" - *At this point, ${A} equals "norf baz"* - -Contrast this behavior with the -:ref:`bitbake-user-manual/bitbake-user-manual-metadata:immediate variable -expansion (:=)` operator. - -If the variable expansion syntax is used on a variable that does not -exist, the string is kept as is. For example, given the following -assignment, ``BAR`` expands to the literal string "${FOO}" as long as -``FOO`` does not exist. :: - - BAR = "${FOO}" - -Setting a default value (?=) ----------------------------- - -You can use the "?=" operator to achieve a "softer" assignment for a -variable. This type of assignment allows you to define a variable if it -is undefined when the statement is parsed, but to leave the value alone -if the variable has a value. Here is an example:: - - A ?= "aval" - -If ``A`` is -set at the time this statement is parsed, the variable retains its -value. However, if ``A`` is not set, the variable is set to "aval". - -.. note:: - - This assignment is immediate. Consequently, if multiple "?=" - assignments to a single variable exist, the first of those ends up - getting used. - -Setting a weak default value (??=) ----------------------------------- - -The weak default value of a variable is the value which that variable -will expand to if no value has been assigned to it via any of the other -assignment operators. The "??=" operator takes effect immediately, replacing -any previously defined weak default value. Here is an example:: - - W ??= "x" - A := "${W}" # Immediate variable expansion - W ??= "y" - B := "${W}" # Immediate variable expansion - W ??= "z" - C = "${W}" - W ?= "i" - -After parsing we will have:: - - A = "x" - B = "y" - C = "i" - W = "i" - -Appending and prepending non-override style will not substitute the weak -default value, which means that after parsing:: - - W ??= "x" - W += "y" - -we will have:: - - W = " y" - -On the other hand, override-style appends/prepends/removes are applied after -any active weak default value has been substituted:: - - W ??= "x" - W:append = "y" - -After parsing we will have:: - - W = "xy" - -Immediate variable expansion (:=) ---------------------------------- - -The ":=" operator results in a variable's contents being expanded -immediately, rather than when the variable is actually used:: - - T = "123" - A := "test ${T}" - T = "456" - B := "${T} ${C}" - C = "cval" - C := "${C}append" - -In this example, ``A`` contains "test 123", even though the final value -of :term:`T` is "456". The variable :term:`B` will end up containing "456 -cvalappend". This is because references to undefined variables are -preserved as is during (immediate)expansion. This is in contrast to GNU -Make, where undefined variables expand to nothing. The variable ``C`` -contains "cvalappend" since ``${C}`` immediately expands to "cval". - -.. _appending-and-prepending: - -Appending (+=) and prepending (=+) With Spaces ----------------------------------------------- - -Appending and prepending values is common and can be accomplished using -the "+=" and "=+" operators. These operators insert a space between the -current value and prepended or appended value. - -These operators take immediate effect during parsing. Here are some -examples:: - - B = "bval" - B += "additionaldata" - C = "cval" - C =+ "test" - -The variable :term:`B` contains "bval additionaldata" and ``C`` contains "test -cval". - -.. _appending-and-prepending-without-spaces: - -Appending (.=) and Prepending (=.) Without Spaces -------------------------------------------------- - -If you want to append or prepend values without an inserted space, use -the ".=" and "=." operators. - -These operators take immediate effect during parsing. Here are some -examples:: - - B = "bval" - B .= "additionaldata" - C = "cval" - C =. "test" - -The variable :term:`B` contains "bvaladditionaldata" and ``C`` contains -"testcval". - -Appending and Prepending (Override Style Syntax) ------------------------------------------------- - -You can also append and prepend a variable's value using an override -style syntax. When you use this syntax, no spaces are inserted. - -These operators differ from the ":=", ".=", "=.", "+=", and "=+" -operators in that their effects are applied at variable expansion time -rather than being immediately applied. Here are some examples:: - - B = "bval" - B:append = " additional data" - C = "cval" - C:prepend = "additional data " - D = "dval" - D:append = "additional data" - -The variable :term:`B` -becomes "bval additional data" and ``C`` becomes "additional data cval". -The variable ``D`` becomes "dvaladditional data". - -.. note:: - - You must control all spacing when you use the override syntax. - -.. note:: - - The overrides are applied in this order, ":append", ":prepend", ":remove". - -It is also possible to append and prepend to shell functions and -BitBake-style Python functions. See the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:shell functions`" and ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:bitbake-style python functions`" -sections for examples. - -.. _removing-override-style-syntax: - -Removal (Override Style Syntax) -------------------------------- - -You can remove values from lists using the removal override style -syntax. Specifying a value for removal causes all occurrences of that -value to be removed from the variable. Unlike ":append" and ":prepend", -there is no need to add a leading or trailing space to the value. - -When you use this syntax, BitBake expects one or more strings. -Surrounding spaces and spacing are preserved. Here is an example:: - - FOO = "123 456 789 123456 123 456 123 456" - FOO:remove = "123" - FOO:remove = "456" - FOO2 = " abc def ghi abcdef abc def abc def def" - FOO2:remove = "\ - def \ - abc \ - ghi \ - " - -The variable ``FOO`` becomes -" 789 123456 " and ``FOO2`` becomes " abcdef ". - -Like ":append" and ":prepend", ":remove" is applied at variable -expansion time. - -.. note:: - - The overrides are applied in this order, ":append", ":prepend", ":remove". - This implies it is not possible to re-append previously removed strings. - However, one can undo a ":remove" by using an intermediate variable whose - content is passed to the ":remove" so that modifying the intermediate - variable equals to keeping the string in:: - - FOOREMOVE = "123 456 789" - FOO:remove = "${FOOREMOVE}" - ... - FOOREMOVE = "123 789" - - This expands to ``FOO:remove = "123 789"``. - -.. note:: - - Override application order may not match variable parse history, i.e. - the output of ``bitbake -e`` may contain ":remove" before ":append", - but the result will be removed string, because ":remove" is handled - last. - -Override Style Operation Advantages ------------------------------------ - -An advantage of the override style operations ":append", ":prepend", and -":remove" as compared to the "+=" and "=+" operators is that the -override style operators provide guaranteed operations. For example, -consider a class ``foo.bbclass`` that needs to add the value "val" to -the variable ``FOO``, and a recipe that uses ``foo.bbclass`` as follows:: - - inherit foo - FOO = "initial" - -If ``foo.bbclass`` uses the "+=" operator, -as follows, then the final value of ``FOO`` will be "initial", which is -not what is desired:: - - FOO += "val" - -If, on the other hand, ``foo.bbclass`` -uses the ":append" operator, then the final value of ``FOO`` will be -"initial val", as intended:: - - FOO:append = " val" - -.. note:: - - It is never necessary to use "+=" together with ":append". The following - sequence of assignments appends "barbaz" to FOO:: - - FOO:append = "bar" - FOO:append = "baz" - - - The only effect of changing the second assignment in the previous - example to use "+=" would be to add a space before "baz" in the - appended value (due to how the "+=" operator works). - -Another advantage of the override style operations is that you can -combine them with other overrides as described in the -":ref:`bitbake-user-manual/bitbake-user-manual-metadata:conditional syntax (overrides)`" section. - -Variable Flag Syntax --------------------- - -Variable flags are BitBake's implementation of variable properties or -attributes. It is a way of tagging extra information onto a variable. -You can find more out about variable flags in general in the -":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variable flags`" section. - -You can define, append, and prepend values to variable flags. All the -standard syntax operations previously mentioned work for variable flags -except for override style syntax (i.e. ":prepend", ":append", and -":remove"). - -Here are some examples showing how to set variable flags:: - - FOO[a] = "abc" - FOO[b] = "123" - FOO[a] += "456" - -The variable ``FOO`` has two flags: -``[a]`` and ``[b]``. The flags are immediately set to "abc" and "123", -respectively. The ``[a]`` flag becomes "abc 456". - -No need exists to pre-define variable flags. You can simply start using -them. One extremely common application is to attach some brief -documentation to a BitBake variable as follows:: - - CACHE[doc] = "The directory holding the cache of the metadata." - -.. note:: - - Variable flag names starting with an underscore (``_``) character - are allowed but are ignored by ``d.getVarFlags("VAR")`` - in Python code. Such flag names are used internally by BitBake. - -Inline Python Variable Expansion --------------------------------- - -You can use inline Python variable expansion to set variables. Here is -an example:: - - DATE = "${@time.strftime('%Y%m%d',time.gmtime())}" - -This example results in the ``DATE`` variable being set to the current date. - -Probably the most common use of this feature is to extract the value of -variables from BitBake's internal data dictionary, ``d``. The following -lines select the values of a package name and its version number, -respectively:: - - PN = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}" - PV = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[1] or '1.0'}" - -.. note:: - - Inline Python expressions work just like variable expansions insofar as the - "=" and ":=" operators are concerned. Given the following assignment, foo() - is called each time FOO is expanded:: - - FOO = "${@foo()}" - - Contrast this with the following immediate assignment, where foo() is only - called once, while the assignment is parsed:: - - FOO := "${@foo()}" - -For a different way to set variables with Python code during parsing, -see the -":ref:`bitbake-user-manual/bitbake-user-manual-metadata:anonymous python functions`" section. - -Unsetting variables -------------------- - -It is possible to completely remove a variable or a variable flag from -BitBake's internal data dictionary by using the "unset" keyword. Here is -an example:: - - unset DATE - unset do_fetch[noexec] - -These two statements remove the ``DATE`` and the ``do_fetch[noexec]`` flag. - -Providing Pathnames -------------------- - -When specifying pathnames for use with BitBake, do not use the tilde -("~") character as a shortcut for your home directory. Doing so might -cause BitBake to not recognize the path since BitBake does not expand -this character in the same way a shell would. - -Instead, provide a fuller path as the following example illustrates:: - - BBLAYERS ?= " \ - /home/scott-lenovo/LayerA \ - " - -Exporting Variables to the Environment -====================================== - -You can export variables to the environment of running tasks by using -the ``export`` keyword. For example, in the following example, the -``do_foo`` task prints "value from the environment" when run:: - - export ENV_VARIABLE - ENV_VARIABLE = "value from the environment" - - do_foo() { - bbplain "$ENV_VARIABLE" - } - -.. note:: - - BitBake does not expand ``$ENV_VARIABLE`` in this case because it lacks the - obligatory ``{}`` . Rather, ``$ENV_VARIABLE`` is expanded by the shell. - -It does not matter whether ``export ENV_VARIABLE`` appears before or -after assignments to ``ENV_VARIABLE``. - -It is also possible to combine ``export`` with setting a value for the -variable. Here is an example:: - - export ENV_VARIABLE = "variable-value" - -In the output of ``bitbake -e``, variables that are exported to the -environment are preceded by "export". - -Among the variables commonly exported to the environment are ``CC`` and -``CFLAGS``, which are picked up by many build systems. - -Conditional Syntax (Overrides) -============================== - -BitBake uses :term:`OVERRIDES` to control what -variables are overridden after BitBake parses recipes and configuration -files. This section describes how you can use :term:`OVERRIDES` as -conditional metadata, talks about key expansion in relationship to -:term:`OVERRIDES`, and provides some examples to help with understanding. - -Conditional Metadata --------------------- - -You can use :term:`OVERRIDES` to conditionally select a specific version of -a variable and to conditionally append or prepend the value of a -variable. - -.. note:: - - Overrides can only use lower-case characters, digits and dashes. - In particular, colons are not permitted in override names as they are used to - separate overrides from each other and from the variable name. - -- *Selecting a Variable:* The :term:`OVERRIDES` variable is a - colon-character-separated list that contains items for which you want - to satisfy conditions. Thus, if you have a variable that is - conditional on "arm", and "arm" is in :term:`OVERRIDES`, then the - "arm"-specific version of the variable is used rather than the - non-conditional version. Here is an example:: - - OVERRIDES = "architecture:os:machine" - TEST = "default" - TEST:os = "osspecific" - TEST:nooverride = "othercondvalue" - - In this example, the :term:`OVERRIDES` - variable lists three overrides: "architecture", "os", and "machine". - The variable ``TEST`` by itself has a default value of "default". You - select the os-specific version of the ``TEST`` variable by appending - the "os" override to the variable (i.e. ``TEST:os``). - - To better understand this, consider a practical example that assumes - an OpenEmbedded metadata-based Linux kernel recipe file. The - following lines from the recipe file first set the kernel branch - variable ``KBRANCH`` to a default value, then conditionally override - that value based on the architecture of the build:: - - KBRANCH = "standard/base" - KBRANCH:qemuarm = "standard/arm-versatile-926ejs" - KBRANCH:qemumips = "standard/mti-malta32" - KBRANCH:qemuppc = "standard/qemuppc" - KBRANCH:qemux86 = "standard/common-pc/base" - KBRANCH:qemux86-64 = "standard/common-pc-64/base" - KBRANCH:qemumips64 = "standard/mti-malta64" - -- *Appending and Prepending:* BitBake also supports append and prepend - operations to variable values based on whether a specific item is - listed in :term:`OVERRIDES`. Here is an example:: - - DEPENDS = "glibc ncurses" - OVERRIDES = "machine:local" - DEPENDS:append:machine = "libmad" - - In this example, :term:`DEPENDS` becomes "glibc ncurses libmad". - - Again, using an OpenEmbedded metadata-based kernel recipe file as an - example, the following lines will conditionally append to the - ``KERNEL_FEATURES`` variable based on the architecture:: - - KERNEL_FEATURES:append = " ${KERNEL_EXTRA_FEATURES}" - KERNEL_FEATURES:append:qemux86=" cfg/sound.scc cfg/paravirt_kvm.scc" - KERNEL_FEATURES:append:qemux86-64=" cfg/sound.scc cfg/paravirt_kvm.scc" - -- *Setting a Variable for a Single Task:* BitBake supports setting a - variable just for the duration of a single task. Here is an example:: - - FOO:task-configure = "val 1" - FOO:task-compile = "val 2" - - In the - previous example, ``FOO`` has the value "val 1" while the - ``do_configure`` task is executed, and the value "val 2" while the - ``do_compile`` task is executed. - - Internally, this is implemented by prepending the task (e.g. - "task-compile:") to the value of - :term:`OVERRIDES` for the local datastore of the - ``do_compile`` task. - - You can also use this syntax with other combinations (e.g. - "``:prepend``") as shown in the following example:: - - EXTRA_OEMAKE:prepend:task-compile = "${PARALLEL_MAKE} " - -.. note:: - - Before BitBake 1.52 (Honister 3.4), the syntax for :term:`OVERRIDES` - used ``_`` instead of ``:``, so you will still find a lot of documentation - using ``_append``, ``_prepend``, and ``_remove``, for example. - - For details, see the - :yocto_docs:`Overrides Syntax Changes ` - section in the Yocto Project manual migration notes. - -Key Expansion -------------- - -Key expansion happens when the BitBake datastore is finalized. To better -understand this, consider the following example:: - - A${B} = "X" - B = "2" - A2 = "Y" - -In this case, after all the parsing is complete, BitBake expands -``${B}`` into "2". This expansion causes ``A2``, which was set to "Y" -before the expansion, to become "X". - -.. _variable-interaction-worked-examples: - -Examples --------- - -Despite the previous explanations that show the different forms of -variable definitions, it can be hard to work out exactly what happens -when variable operators, conditional overrides, and unconditional -overrides are combined. This section presents some common scenarios -along with explanations for variable interactions that typically confuse -users. - -There is often confusion concerning the order in which overrides and -various "append" operators take effect. Recall that an append or prepend -operation using ":append" and ":prepend" does not result in an immediate -assignment as would "+=", ".=", "=+", or "=.". Consider the following -example:: - - OVERRIDES = "foo" - A = "Z" - A:foo:append = "X" - -For this case, -``A`` is unconditionally set to "Z" and "X" is unconditionally and -immediately appended to the variable ``A:foo``. Because overrides have -not been applied yet, ``A:foo`` is set to "X" due to the append and -``A`` simply equals "Z". - -Applying overrides, however, changes things. Since "foo" is listed in -:term:`OVERRIDES`, the conditional variable ``A`` is replaced with the "foo" -version, which is equal to "X". So effectively, ``A:foo`` replaces -``A``. - -This next example changes the order of the override and the append:: - - OVERRIDES = "foo" - A = "Z" - A:append:foo = "X" - -For this case, before -overrides are handled, ``A`` is set to "Z" and ``A:append:foo`` is set -to "X". Once the override for "foo" is applied, however, ``A`` gets -appended with "X". Consequently, ``A`` becomes "ZX". Notice that spaces -are not appended. - -This next example has the order of the appends and overrides reversed -back as in the first example:: - - OVERRIDES = "foo" - A = "Y" - A:foo:append = "Z" - A:foo:append = "X" - -For this case, before any overrides are resolved, -``A`` is set to "Y" using an immediate assignment. After this immediate -assignment, ``A:foo`` is set to "Z", and then further appended with "X" -leaving the variable set to "ZX". Finally, applying the override for -"foo" results in the conditional variable ``A`` becoming "ZX" (i.e. -``A`` is replaced with ``A:foo``). - -This final example mixes in some varying operators:: - - A = "1" - A:append = "2" - A:append = "3" - A += "4" - A .= "5" - -For this case, the type of append -operators are affecting the order of assignments as BitBake passes -through the code multiple times. Initially, ``A`` is set to "1 45" -because of the three statements that use immediate operators. After -these assignments are made, BitBake applies the ":append" operations. -Those operations result in ``A`` becoming "1 4523". - -Sharing Functionality -===================== - -BitBake allows for metadata sharing through include files (``.inc``) and -class files (``.bbclass``). For example, suppose you have a piece of -common functionality such as a task definition that you want to share -between more than one recipe. In this case, creating a ``.bbclass`` file -that contains the common functionality and then using the ``inherit`` -directive in your recipes to inherit the class would be a common way to -share the task. - -This section presents the mechanisms BitBake provides to allow you to -share functionality between recipes. Specifically, the mechanisms -include ``include``, ``inherit``, :term:`INHERIT`, and ``require`` -directives. There is also a higher-level abstraction called -``configuration fragments`` that is enabled with ``addfragments`` -directive. - -.. _ref-bitbake-user-manual-metadata-inherit: - -``inherit`` Directive ---------------------- - -When writing a recipe or class file, you can use the ``inherit`` -directive to inherit the functionality of a class (``.bbclass``). -BitBake only supports this directive when used within recipe and class -files (i.e. ``.bb`` and ``.bbclass``). - -The ``inherit`` directive is a rudimentary means of specifying -functionality contained in class files that your recipes require. For -example, you can easily abstract out the tasks involved in building a -package that uses Autoconf and Automake and put those tasks into a class -file and then have your recipe inherit that class file. - -As an example, your recipes could use the following directive to inherit -an ``autotools.bbclass`` file. The class file would contain common -functionality for using Autotools that could be shared across recipes:: - - inherit autotools - -In this case, BitBake would search for the directory -``classes/autotools.bbclass`` in :term:`BBPATH`. - -.. note:: - - You can override any values and functions of the inherited class - within your recipe by doing so after the "inherit" statement. - -If you want to use the directive to inherit multiple classes, separate -them with spaces. The following example shows how to inherit both the -``buildhistory`` and ``rm_work`` classes:: - - inherit buildhistory rm_work - -An advantage with the inherit directive as compared to both the -:ref:`include ` and :ref:`require ` -directives is that you can inherit class files conditionally. You can -accomplish this by using a variable expression after the ``inherit`` -statement. - -For inheriting classes conditionally, using the :ref:`inherit_defer -` directive is advised as -:ref:`inherit_defer ` is -evaluated at the end of parsing. - -.. _ref-bitbake-user-manual-metadata-inherit-defer: - -``inherit_defer`` Directive -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The :ref:`inherit_defer ` -directive works like the :ref:`inherit -` directive, except that it is only -evaluated at the end of parsing. Its usage is recommended when a conditional -expression is used. - -This allows conditional expressions to be evaluated "late", meaning changes to -the variable after the line is parsed will take effect. With the :ref:`inherit -` directive this is not the case. - -Here is an example:: - - inherit_defer ${VARNAME} - -If ``VARNAME`` is -going to be set, it needs to be set before the ``inherit_defer`` statement is -parsed. One way to achieve a conditional inherit in this case is to use -overrides:: - - VARIABLE = "" - VARIABLE:someoverride = "myclass" - -Another method is by using :ref:`anonymous Python -`. -Here is an example:: - - python () { - if condition == value: - d.setVar('VARIABLE', 'myclass') - else: - d.setVar('VARIABLE', '') - } - -Alternatively, you could use an inline Python expression in the -following form:: - - inherit_defer ${@'classname' if condition else ''} - -Or:: - - inherit_defer ${@bb.utils.contains('VARIABLE', 'something', 'classname', '', d)} - -In all cases, if the expression evaluates to an -empty string, the statement does not trigger a syntax error because it -becomes a no-op. - -See also :term:`BB_DEFER_BBCLASSES` for automatically promoting classes -``inherit`` calls to ``inherit_defer``. - -.. _ref-include-directive: - -``include`` Directive ---------------------- - -The ``include`` directive causes BitBake to parse a given file, -and to include that file's contents at the location of the -``include`` statement. This directive is similar to its equivalent -in Make, except that if the path specified on the BitBake ``include`` -line is a relative path, BitBake will search for it on the path designated -by :term:`BBPATH` and will include *only the first matching file*. - -The ``include`` directive is a more generic method of including -functionality as compared to the :ref:`inherit ` -directive, which is restricted to class (i.e. ``.bbclass``) files. The -``include`` directive is applicable for any other kind of shared or -encapsulated functionality or configuration that does not suit a -``.bbclass`` file. - -For example, if you needed a recipe to include some self-test definitions, -you might write:: - - include test_defs.inc - -The ``include`` directive does not produce an error if the specified file -cannot be found. If the included file *must* exist, then you should use -use :ref:`require ` instead, which will generate an error -if the file cannot be found. - -.. note:: - - Note well that the ``include`` directive will include the first matching - file and nothing further (which is almost always the behaviour you want). - If you need to include all matching files, you need to use the - ``include_all`` directive, explained below. - -.. _ref-include-all-directive: - -``include_all`` Directive -------------------------- - -The ``include_all`` directive works like the :ref:`include -` -directive but will include *all* of the files that match the specified path in -the enabled layers (layers part of :term:`BBLAYERS`). - -.. note:: - - This behaviour is rarely what you want in normal operation, and should - be reserved for only those situations when you explicitly want to parse - and include all matching files found across all layers, as the following - example shows. - -As a realistic example of this directive, imagine that all of your active -layers contain a file ``conf/distro/include/maintainers.inc``, containing -maintainer information for the recipes in that layer, and you wanted to -collect all of the content from all of those files across all of those layers. -You could use the statement:: - - include_all conf/distro/include/maintainers.inc - -In this case, BitBake will iterate through all of the directories in -the colon-separated :term:`BBPATH` (from left to right) and collect the -contents of all matching files, so you end up with the maintainer -information of all of your active layers, not just the first one. - -As the ``include_all`` directive uses the ``include`` directive in the -background, as with ``include``, no error is produced if no files are matched. - -.. _require-inclusion: - -``require`` Directive ---------------------- - -BitBake understands the ``require`` directive. This directive behaves -just like the ``include`` directive with the exception that BitBake -raises a parsing error if the file to be included cannot be found. Thus, -any file you require is inserted into the file that is being parsed at -the location of the directive. - -The require directive, like the include directive previously described, -is a more generic method of including functionality as compared to the -:ref:`inherit ` directive, which is restricted to class -(i.e. ``.bbclass``) files. The require directive is applicable for any -other kind of shared or encapsulated functionality or configuration that -does not suit a ``.bbclass`` file. - -Similar to how BitBake handles :ref:`include `, if -the path specified on the require line is a relative path, BitBake -locates the first file it can find within :term:`BBPATH`. - -As an example, suppose you have two versions of a recipe (e.g. -``foo_1.2.2.bb`` and ``foo_2.0.0.bb``) where each version contains some -identical functionality that could be shared. You could create an -include file named ``foo.inc`` that contains the common definitions -needed to build "foo". You need to be sure ``foo.inc`` is located in the -same directory as your two recipe files as well. Once these conditions -are set up, you can share the functionality using a ``require`` -directive from within each recipe:: - - require foo.inc - -``INHERIT`` Configuration Directive ------------------------------------ - -When creating a configuration file (``.conf``), you can use the -:term:`INHERIT` configuration directive to inherit a -class. BitBake only supports this directive when used within a -configuration file. - -As an example, suppose you needed to inherit a class file called -``abc.bbclass`` from a configuration file as follows:: - - INHERIT += "abc" - -This configuration directive causes the named class to be inherited at -the point of the directive during parsing. As with the ``inherit`` -directive, the ``.bbclass`` file must be located in a "classes" -subdirectory in one of the directories specified in :term:`BBPATH`. - -.. note:: - - Because .conf files are parsed first during BitBake's execution, using - INHERIT to inherit a class effectively inherits the class globally (i.e. for - all recipes). - -If you want to use the directive to inherit multiple classes, you can -provide them on the same line in the ``local.conf`` file. Use spaces to -separate the classes. The following example shows how to inherit both -the ``autotools`` and ``pkgconfig`` classes:: - - INHERIT += "autotools pkgconfig" - -``addfragments`` Directive --------------------------- - -This directive allows fine-tuning local configurations with configuration -snippets contained in layers in a structured, controlled way. Typically it would -go into ``bitbake.conf``, for example:: - - addfragments conf/fragments OE_FRAGMENTS OE_FRAGMENTS_METADATA_VARS OE_FRAGMENTS_BUILTIN - -``addfragments`` takes four parameters: - -- path prefix for fragment files inside the layer file tree that bitbake - uses to construct full paths to the fragment files - -- name of variable that holds the list of enabled fragments in an - active build - -- name of variable that contains a list of variable names containing - fragment-specific metadata (such as descriptions) - -- name of variable that contains definitions for built-in fragments - -This allows listing enabled configuration fragments in ``OE_FRAGMENTS`` -variable like this:: - - OE_FRAGMENTS = "core/domain/somefragment core/someotherfragment anotherlayer/anotherdomain/anotherfragment" - -Fragment names listed in this variable must be prefixed by the layer name -where a fragment file is located, defined by :term:`BBFILE_COLLECTIONS` in ``layer.conf``. - -The implementation then expands this list into -:ref:`require ` -directives with full paths to respective layers:: - - require /path/to/core-layer/conf/fragments/domain/somefragment.conf - require /path/to/core-layer/conf/fragments/someotherfragment.conf - require /path/to/another-layer/conf/fragments/anotherdomain/anotherfragment.conf - -The variable containing a list of fragment metadata variables could look like this:: - - OE_FRAGMENTS_METADATA_VARS = "BB_CONF_FRAGMENT_SUMMARY BB_CONF_FRAGMENT_DESCRIPTION" - -The implementation will add a flag containing the fragment name to each of those variables -when parsing fragments, so that the variables are namespaced by fragment name, and do not override -each other when several fragments are enabled. - -The variable containing a built-in fragment definitions could look like this:: - - OE_FRAGMENTS_BUILTIN = "someprefix:SOMEVARIABLE anotherprefix:ANOTHERVARIABLE" - -and then if 'someprefix/somevalue' is added to the variable that holds the list -of enabled fragments: - - OE_FRAGMENTS = "... someprefix/somevalue" - -bitbake will treat that as direct value assignment in its configuration:: - - SOMEVARIABLE = "somevalue" - -Locating Include Files ----------------------- - -BitBake uses the :term:`BBPATH` variable to locate needed include files. -Additionally, BitBake searches the current directory for :ref:`include -` and :ref:`require ` directives. - -.. note:: - - The BBPATH variable is analogous to the environment variable PATH . - -For these two directives, BitBake includes the first file it finds. - -.. note:: - - It is also possible to include *all* occurences of a file with the same name - with the :ref:`include_all ` directive. - -Let's consider the following statement called from a recipe file located in -``/layers/meta-custom2/recipes-example/example_0.1.bb``:: - - require myfile.inc - -Where ``myfile.inc`` is located in ``/layers/meta-custom2/recipes-example/``. - -And let's assume that the value of :term:`BBPATH` is -``/layers/meta-custom1:/layers/meta-custom2``. Then BitBake will try to find -``myfile.inc`` in this order:: - - /layers/meta-custom2/recipes-example/example/myfile.inc - /layers/meta-custom1/myfile.inc - /layers/meta-custom2/myfile.inc - -In this case the first path of the list matches and BitBake includes this file -in ``example_0.1.bb``. - -Another common example would be:: - - require recipes-other/other/otherfile.inc - -Where ``otherfile.inc`` is located in -``/layers/meta-custom1/recipes-other/other/``. - -In this case, the following paths would be searched:: - - /layers/meta-custom2/recipes-example/example/recipes-other/other/otherfile.inc - /layers/meta-custom1/recipes-other/other/otherfile.inc - /layers/meta-custom2/recipes-other/other/otherfile.inc - -This time, the second item of this list would be matched. - -.. note:: - - In the above examples, the exact same search order applies for the - :ref:`include ` directive. - -Locating Class Files --------------------- - -Like include files, class files are located using the :term:`BBPATH` variable. -The classes can be included in the ``classes-recipe``, ``classes-global`` and -``classes`` directories, as explained in the -:ref:`bitbake-user-manual/bitbake-user-manual-intro:Class types` section of the -Bitbake User Manual. Like for the :ref:`include ` and -:ref:`require ` directives, BitBake stops and inherits the -first class that it finds. - -For classes inherited with the :ref:`inherit -` directive, BitBake will try to -locate the class under each ``classes-recipe`` directory for each path in -:term:`BBPATH`, and then do the same for each ``classes`` directory for each -path in :term:`BBPATH`. - -For example, if the value :term:`BBPATH` is -``/layers/meta-custom1:/layers/meta-custom2`` then the ``hello`` class -would be searched in this order:: - - /layers/meta-custom1/classes-recipe/hello.bbclass - /layers/meta-custom2/classes-recipe/hello.bbclass - /layers/meta-custom1/classes/hello.bbclass - /layers/meta-custom2/classes/hello.bbclass - -.. note:: - - Note that the order of the list above does not depend on where the class in - inherited from. - -Likewise, for classes inherited with the :term:`INHERIT` variable, the -``classes-global`` directory is searched first, and the ``classes`` directory is -searched second. Taking the above example, this would result in the following -list:: - - /layers/meta-custom1/classes-global/hello.bbclass - /layers/meta-custom2/classes-global/hello.bbclass - /layers/meta-custom1/classes/hello.bbclass - /layers/meta-custom2/classes/hello.bbclass - -Functions -========= - -As with most languages, functions are the building blocks that are used -to build up operations into tasks. BitBake supports these types of -functions: - -- *Shell Functions:* Functions written in shell script and executed - either directly as functions, tasks, or both. They can also be called - by other shell functions. - -- *BitBake-Style Python Functions:* Functions written in Python and - executed by BitBake or other Python functions using - ``bb.build.exec_func()``. - -- *Python Functions:* Functions written in Python and executed by - Python. - -- *Anonymous Python Functions:* Python functions executed automatically - during parsing. - -Regardless of the type of function, you can only define them in class -(``.bbclass``) and recipe (``.bb`` or ``.inc``) files. - -Shell Functions ---------------- - -Functions written in shell script are executed either directly as -functions, tasks, or both. They can also be called by other shell -functions. Here is an example shell function definition:: - - some_function () { - echo "Hello World" - } - -When you create these types of functions in -your recipe or class files, you need to follow the shell programming -rules. The scripts are executed by ``/bin/sh``, which may not be a bash -shell but might be something such as ``dash``. You should not use -Bash-specific script (bashisms). - -Overrides and override-style operators like ``:append`` and ``:prepend`` -can also be applied to shell functions. Most commonly, this application -would be used in a ``.bbappend`` file to modify functions in the main -recipe. It can also be used to modify functions inherited from classes. - -As an example, consider the following:: - - do_foo() { - bbplain first - fn - } - - fn:prepend() { - bbplain second - } - - fn() { - bbplain third - } - - do_foo:append() { - bbplain fourth - } - -Running ``do_foo`` prints the following:: - - recipename do_foo: first - recipename do_foo: second - recipename do_foo: third - recipename do_foo: fourth - -.. note:: - - Overrides and override-style operators can be applied to any shell - function, not just :ref:`tasks `. - -You can use the ``bitbake -e recipename`` command to view the final -assembled function after all overrides have been applied. - -BitBake-Style Python Functions ------------------------------- - -These functions are written in Python and executed by BitBake or other -Python functions using ``bb.build.exec_func()``. - -An example BitBake function is:: - - python some_python_function () { - d.setVar("TEXT", "Hello World") - print d.getVar("TEXT") - } - -Because the -Python "bb" and "os" modules are already imported, you do not need to -import these modules. Also in these types of functions, the datastore -("d") is a global variable and is always automatically available. - -.. note:: - - Variable expressions (e.g. ``${X}`` ) are no longer expanded within Python - functions. This behavior is intentional in order to allow you to freely set - variable values to expandable expressions without having them expanded - prematurely. If you do wish to expand a variable within a Python function, - use ``d.getVar("X")`` . Or, for more complicated expressions, use ``d.expand()``. - -Similar to shell functions, you can also apply overrides and -override-style operators to BitBake-style Python functions. - -As an example, consider the following:: - - python do_foo:prepend() { - bb.plain("first") - } - - python do_foo() { - bb.plain("second") - } - - python do_foo:append() { - bb.plain("third") - } - -Running ``do_foo`` prints the following:: - - recipename do_foo: first - recipename do_foo: second - recipename do_foo: third - -You can use the ``bitbake -e recipename`` command to view -the final assembled function after all overrides have been applied. - -Python Functions ----------------- - -These functions are written in Python and are executed by other Python -code. Examples of Python functions are utility functions that you intend -to call from in-line Python or from within other Python functions. Here -is an example:: - - def get_depends(d): - if d.getVar('SOMECONDITION'): - return "dependencywithcond" - else: - return "dependency" - - SOMECONDITION = "1" - DEPENDS = "${@get_depends(d)}" - -This would result in :term:`DEPENDS` containing ``dependencywithcond``. - -Here are some things to know about Python functions: - -- Python functions can take parameters. - -- The BitBake datastore is not automatically available. Consequently, - you must pass it in as a parameter to the function. - -- The "bb" and "os" Python modules are automatically available. You do - not need to import them. - -BitBake-Style Python Functions Versus Python Functions ------------------------------------------------------- - -Following are some important differences between BitBake-style Python -functions and regular Python functions defined with "def": - -- Only BitBake-style Python functions can be :ref:`tasks `. - -- Overrides and override-style operators can only be applied to - BitBake-style Python functions. - -- Only regular Python functions can take arguments and return values. - -- :ref:`Variable flags ` such as - ``[dirs]``, ``[cleandirs]``, and ``[lockfiles]`` can be used on BitBake-style - Python functions, but not on regular Python functions. - -- BitBake-style Python functions generate a separate - ``${``\ :term:`T`\ ``}/run.``\ function-name\ ``.``\ pid - script that is executed to run the function, and also generate a log - file in ``${T}/log.``\ function-name\ ``.``\ pid if they are executed - as tasks. - - Regular Python functions execute "inline" and do not generate any - files in ``${T}``. - -- Regular Python functions are called with the usual Python syntax. - BitBake-style Python functions are usually tasks and are called - directly by BitBake, but can also be called manually from Python code - by using the ``bb.build.exec_func()`` function. Here is an example:: - - bb.build.exec_func("my_bitbake_style_function", d) - - .. note:: - - ``bb.build.exec_func()`` can also be used to run shell functions from Python - code. If you want to run a shell function before a Python function within - the same task, then you can use a parent helper Python function that - starts by running the shell function with ``bb.build.exec_func()`` and then - runs the Python code. - - To detect errors from functions executed with - ``bb.build.exec_func()``, you can catch the ``bb.build.FuncFailed`` - exception. - - .. note:: - - Functions in metadata (recipes and classes) should not themselves raise - ``bb.build.FuncFailed``. Rather, ``bb.build.FuncFailed`` should be viewed as a - general indicator that the called function failed by raising an - exception. For example, an exception raised by ``bb.fatal()`` will be caught - inside ``bb.build.exec_func()``, and a ``bb.build.FuncFailed`` will be raised in - response. - -Due to their simplicity, you should prefer regular Python functions over -BitBake-style Python functions unless you need a feature specific to -BitBake-style Python functions. Regular Python functions in metadata are -a more recent invention than BitBake-style Python functions, and older -code tends to use ``bb.build.exec_func()`` more often. - -Anonymous Python Functions --------------------------- - -Sometimes it is useful to set variables or perform other operations -programmatically during parsing. To do this, you can define special -Python functions, called anonymous Python functions, that run at the end -of parsing. For example, the following conditionally sets a variable -based on the value of another variable:: - - python () { - if d.getVar('SOMEVAR') == 'value': - d.setVar('ANOTHERVAR', 'value2') - } - -An equivalent way to mark a function as an anonymous function is to give it -the name "__anonymous", rather than no name. - -Anonymous Python functions always run at the end of parsing, regardless -of where they are defined. If a recipe contains many anonymous -functions, they run in the same order as they are defined within the -recipe. As an example, consider the following snippet:: - - python () { - d.setVar('FOO', 'foo 2') - } - - FOO = "foo 1" - - python () { - d.appendVar('BAR',' bar 2') - } - - BAR = "bar 1" - -The previous example is conceptually -equivalent to the following snippet:: - - FOO = "foo 1" - BAR = "bar 1" - FOO = "foo 2" - BAR += "bar 2" - -``FOO`` ends up with the value "foo 2", and -``BAR`` with the value "bar 1 bar 2". Just as in the second snippet, the -values set for the variables within the anonymous functions become -available to tasks, which always run after parsing. - -Overrides and override-style operators such as "``:append``" are applied -before anonymous functions run. In the following example, ``FOO`` ends -up with the value "foo from anonymous":: - - FOO = "foo" - FOO:append = " from outside" - - python () { - d.setVar("FOO", "foo from anonymous") - } - -For methods -you can use with anonymous Python functions, see the -":ref:`bitbake-user-manual/bitbake-user-manual-metadata:functions you can call from within python`" -section. For a different method to run Python code during parsing, see -the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:inline python variable expansion`" section. - -Flexible Inheritance for Class Functions ----------------------------------------- - -Through coding techniques and the use of ``EXPORT_FUNCTIONS``, BitBake -supports exporting a function from a class such that the class function -appears as the default implementation of the function, but can still be -called if a recipe inheriting the class needs to define its own version -of the function. - -To understand the benefits of this feature, consider the basic scenario -where a class defines a task function and your recipe inherits the -class. In this basic scenario, your recipe inherits the task function as -defined in the class. If desired, your recipe can add to the start and -end of the function by using the ":prepend" or ":append" operations -respectively, or it can redefine the function completely. However, if it -redefines the function, there is no means for it to call the class -version of the function. ``EXPORT_FUNCTIONS`` provides a mechanism that -enables the recipe's version of the function to call the original -version of the function. - -To make use of this technique, you need the following things in place: - -- The class needs to define the function as follows:: - - classname_functionname - - For example, if you have a class file - ``bar.bbclass`` and a function named ``do_foo``, the class must - define the function as follows:: - - bar_do_foo - -- The class needs to contain the ``EXPORT_FUNCTIONS`` statement as - follows:: - - EXPORT_FUNCTIONS functionname - - For example, continuing with - the same example, the statement in the ``bar.bbclass`` would be as - follows:: - - EXPORT_FUNCTIONS do_foo - -- You need to call the function appropriately from within your recipe. - Continuing with the same example, if your recipe needs to call the - class version of the function, it should call ``bar_do_foo``. - Assuming ``do_foo`` was a shell function and ``EXPORT_FUNCTIONS`` was - used as above, the recipe's function could conditionally call the - class version of the function as follows:: - - do_foo() { - if [ somecondition ] ; then - bar_do_foo - else - # Do something else - fi - } - - To call your modified version of the function as defined in your recipe, - call it as ``do_foo``. - -With these conditions met, your single recipe can freely choose between -the original function as defined in the class file and the modified -function in your recipe. If you do not set up these conditions, you are -limited to using one function or the other. - -Tasks -===== - -Tasks are BitBake execution units that make up the steps that BitBake -can run for a given recipe. Tasks are only supported in recipes and -classes (i.e. in ``.bb`` files and files included or inherited from -``.bb`` files). By convention, tasks have names that start with "do\_". - -Promoting a Function to a Task ------------------------------- - -Tasks are either :ref:`shell functions ` or -:ref:`BitBake-style Python functions ` -that have been promoted to tasks by using the ``addtask`` command. The -``addtask`` command can also optionally describe dependencies between -the task and other tasks. Here is an example that shows how to define a -task and declare some dependencies:: - - python do_printdate () { - import datetime - bb.plain('Date: %s' % (datetime.date.today())) - } - addtask printdate after do_fetch before do_build - -The first argument to ``addtask`` is the name -of the function to promote to a task. If the name does not start with -"do\_", "do\_" is implicitly added, which enforces the convention that all -task names start with "do\_". - -In the previous example, the ``do_printdate`` task becomes a dependency -of the ``do_build`` task, which is the default task (i.e. the task run -by the ``bitbake`` command unless another task is specified explicitly). -Additionally, the ``do_printdate`` task becomes dependent upon the -``do_fetch`` task. Running the ``do_build`` task results in the -``do_printdate`` task running first. - -.. note:: - - If you try out the previous example, you might see that the - ``do_printdate`` - task is only run the first time you build the recipe with the - ``bitbake`` - command. This is because BitBake considers the task "up-to-date" - after that initial run. If you want to force the task to always be - rerun for experimentation purposes, you can make BitBake always - consider the task "out-of-date" by using the - :ref:`[nostamp] ` - variable flag, as follows:: - - do_printdate[nostamp] = "1" - - You can also explicitly run the task and provide the - -f option as follows:: - - $ bitbake recipe -c printdate -f - - When manually selecting a task to run with the bitbake ``recipe - -c task`` command, you can omit the "do\_" prefix as part of the task - name. - -You might wonder about the practical effects of using ``addtask`` -without specifying any dependencies as is done in the following example:: - - addtask printdate - -In this example, assuming dependencies have not been -added through some other means, the only way to run the task is by -explicitly selecting it with ``bitbake`` recipe ``-c printdate``. You -can use the ``do_listtasks`` task to list all tasks defined in a recipe -as shown in the following example:: - - $ bitbake recipe -c listtasks - -For more information on task dependencies, see the -":ref:`bitbake-user-manual/bitbake-user-manual-execution:dependencies`" section. - -See the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variable flags`" section for information -on variable flags you can use with tasks. - -.. note:: - - While it's infrequent, it's possible to define multiple tasks as - dependencies when calling ``addtask``. For example, here's a snippet - from the OpenEmbedded class file ``package_tar.bbclass``:: - - addtask package_write_tar before do_build after do_packagedata do_package - - Note how the ``package_write_tar`` task has to wait until both of - ``do_packagedata`` and ``do_package`` complete. - -Deleting a Task ---------------- - -As well as being able to add tasks, you can delete them. Simply use the -``deltask`` command to delete a task. For example, to delete the example -task used in the previous sections, you would use:: - - deltask printdate - -If you delete a task using the ``deltask`` command and the task has -dependencies, the dependencies are not reconnected. For example, suppose -you have three tasks named ``do_a``, ``do_b``, and ``do_c``. -Furthermore, ``do_c`` is dependent on ``do_b``, which in turn is -dependent on ``do_a``. Given this scenario, if you use ``deltask`` to -delete ``do_b``, the implicit dependency relationship between ``do_c`` -and ``do_a`` through ``do_b`` no longer exists, and ``do_c`` -dependencies are not updated to include ``do_a``. Thus, ``do_c`` is free -to run before ``do_a``. - -If you want dependencies such as these to remain intact, use the -``[noexec]`` varflag to disable the task instead of using the -``deltask`` command to delete it:: - - do_b[noexec] = "1" - -Passing Information Into the Build Task Environment ---------------------------------------------------- - -When running a task, BitBake tightly controls the shell execution -environment of the build tasks to make sure unwanted contamination from -the build machine cannot influence the build. - -.. note:: - - By default, BitBake cleans the environment to include only those - things exported or listed in its passthrough list to ensure that the - build environment is reproducible and consistent. You can prevent this - "cleaning" by setting the :term:`BB_PRESERVE_ENV` variable. - -Consequently, if you do want something to get passed into the build task -environment, you must take these two steps: - -#. Tell BitBake to load what you want from the environment into the - datastore. You can do so through the - :term:`BB_ENV_PASSTHROUGH` and - :term:`BB_ENV_PASSTHROUGH_ADDITIONS` variables. For - example, assume you want to prevent the build system from accessing - your ``$HOME/.ccache`` directory. The following command adds the - the environment variable ``CCACHE_DIR`` to BitBake's passthrough - list to allow that variable into the datastore:: - - export BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS CCACHE_DIR" - -#. Tell BitBake to export what you have loaded into the datastore to the - task environment of every running task. Loading something from the - environment into the datastore (previous step) only makes it - available in the datastore. To export it to the task environment of - every running task, use a command similar to the following in your - local configuration file ``local.conf`` or your distribution - configuration file:: - - export CCACHE_DIR - - .. note:: - - A side effect of the previous steps is that BitBake records the - variable as a dependency of the build process in things like the - setscene checksums. If doing so results in unnecessary rebuilds of - tasks, you can also flag the variable so that the setscene code - ignores the dependency when it creates checksums. - -Sometimes, it is useful to be able to obtain information from the -original execution environment. BitBake saves a copy of the original -environment into a special variable named :term:`BB_ORIGENV`. - -The :term:`BB_ORIGENV` variable returns a datastore object that can be -queried using the standard datastore operators such as -``getVar(, False)``. The datastore object is useful, for example, to -find the original ``DISPLAY`` variable. Here is an example:: - - origenv = d.getVar("BB_ORIGENV", False) - bar = origenv.getVar("BAR", False) - -The previous example returns ``BAR`` from the original execution -environment. - -Variable Flags -============== - -Variable flags (varflags) help control a task's functionality and -dependencies. BitBake reads and writes varflags to the datastore using -the following command forms:: - - variable = d.getVarFlags("variable") - self.d.setVarFlags("FOO", {"func": True}) - -When working with varflags, the same syntax, with the exception of -overrides, applies. In other words, you can set, append, and prepend -varflags just like variables. See the -":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variable flag syntax`" section for details. - -BitBake has a defined set of varflags available for recipes and classes. -Tasks support a number of these flags which control various -functionality of the task: - -- ``[cleandirs]``: Empty directories that should be created before - the task runs. Directories that already exist are removed and - recreated to empty them. - -- ``[depends]``: Controls inter-task dependencies. See the - :term:`DEPENDS` variable and the - ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:inter-task - dependencies`" section for more information. - -- ``[deptask]``: Controls task build-time dependencies. See the - :term:`DEPENDS` variable and the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:build dependencies`" section for more information. - -- ``[dirs]``: Directories that should be created before the task - runs. Directories that already exist are left as is. The last - directory listed is used as the current working directory for the - task. - -- ``[file-checksums]``: Controls the file dependencies for a task. The - baseline file list is the set of files associated with - :term:`SRC_URI`. May be used to set additional dependencies on - files not associated with :term:`SRC_URI`. - - The value set to the list is a file-boolean pair where the first - value is the file name and the second is whether or not it - physically exists on the filesystem. :: - - do_configure[file-checksums] += "${MY_DIRPATH}/my-file.txt:True" - - It is important to record any paths which the task looked at and - which didn't exist. This means that if these do exist at a later - time, the task can be rerun with the new additional files. The - "exists" True or False value after the path allows this to be - handled. - -- ``[lockfiles]``: Specifies one or more lockfiles to lock while the - task executes. Only one task may hold a lockfile, and any task that - attempts to lock an already locked file will block until the lock is - released. You can use this variable flag to accomplish mutual - exclusion. - -- ``[network]``: When set to "1", allows a task to access the network. By - default, only the ``do_fetch`` task is granted network access. Recipes - shouldn't access the network outside of ``do_fetch`` as it usually - undermines fetcher source mirroring, image and licence manifests, software - auditing and supply chain security. - -- ``[noexec]``: When set to "1", marks the task as being empty, with - no execution required. You can use the ``[noexec]`` flag to set up - tasks as dependency placeholders, or to disable tasks defined - elsewhere that are not needed in a particular recipe. - -- ``[nostamp]``: When set to "1", tells BitBake to not generate a - stamp file for a task, which implies the task should always be - executed. - - .. caution:: - - Any task that depends (possibly indirectly) on a ``[nostamp]`` task will - always be executed as well. This can cause unnecessary rebuilding if you - are not careful. - -- ``[number_threads]``: Limits tasks to a specific number of - simultaneous threads during execution. This varflag is useful when - your build host has a large number of cores but certain tasks need to - be rate-limited due to various kinds of resource constraints (e.g. to - avoid network throttling). ``number_threads`` works similarly to the - :term:`BB_NUMBER_THREADS` variable but is task-specific. - - Set the value globally. For example, the following makes sure the - ``do_fetch`` task uses no more than two simultaneous execution - threads: do_fetch[number_threads] = "2" - - .. warning:: - - - Setting the varflag in individual recipes rather than globally - can result in unpredictable behavior. - - - Setting the varflag to a value greater than the value used in - the :term:`BB_NUMBER_THREADS` variable causes ``number_threads`` to - have no effect. - -- ``[postfuncs]``: List of functions to call after the completion of - the task. - -- ``[prefuncs]``: List of functions to call before the task executes. - -- ``[rdepends]``: Controls inter-task runtime dependencies. See the - :term:`RDEPENDS` variable, the - :term:`RRECOMMENDS` variable, and the - ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:inter-task dependencies`" section for - more information. - -- ``[rdeptask]``: Controls task runtime dependencies. See the - :term:`RDEPENDS` variable, the - :term:`RRECOMMENDS` variable, and the - ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:runtime dependencies`" section for more - information. - -- ``[recideptask]``: When set in conjunction with ``recrdeptask``, - specifies a task that should be inspected for additional - dependencies. - -- ``[recrdeptask]``: Controls task recursive runtime dependencies. - See the :term:`RDEPENDS` variable, the - :term:`RRECOMMENDS` variable, and the - ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:recursive dependencies`" section for - more information. - -- ``[stamp-extra-info]``: Extra stamp information to append to the - task's stamp. As an example, OpenEmbedded uses this flag to allow - machine-specific tasks. - -- ``[umask]``: The umask to run the task under. - -Several varflags are useful for controlling how signatures are -calculated for variables. For more information on this process, see the -":ref:`bitbake-user-manual/bitbake-user-manual-execution:checksums (signatures)`" section. - -- ``[vardeps]``: Specifies a space-separated list of additional - variables to add to a variable's dependencies for the purposes of - calculating its signature. Adding variables to this list is useful, - for example, when a function refers to a variable in a manner that - does not allow BitBake to automatically determine that the variable - is referred to. - -- ``[vardepsexclude]``: Specifies a space-separated list of variables - that should be excluded from a variable's dependencies for the - purposes of calculating its signature. - -- ``[vardepvalue]``: If set, instructs BitBake to ignore the actual - value of the variable and instead use the specified value when - calculating the variable's signature. - -- ``[vardepvalueexclude]``: Specifies a pipe-separated list of - strings to exclude from the variable's value when calculating the - variable's signature. - -Events -====== - -BitBake allows installation of event handlers within recipe and class -files. Events are triggered at certain points during operation, such as -the beginning of operation against a given recipe (i.e. ``*.bb``), the -start of a given task, a task failure, a task success, and so forth. The -intent is to make it easy to do things like email notification on build -failures. - -Following is an example event handler that prints the name of the event -and the content of the :term:`FILE` variable:: - - addhandler myclass_eventhandler - python myclass_eventhandler() { - from bb.event import getName - print("The name of the Event is %s" % getName(e)) - print("The file we run for is %s" % d.getVar('FILE')) - } - myclass_eventhandler[eventmask] = "bb.event.BuildStarted - bb.event.BuildCompleted" - -In the previous example, an eventmask has been -set so that the handler only sees the "BuildStarted" and -"BuildCompleted" events. This event handler gets called every time an -event matching the eventmask is triggered. A global variable "e" is -defined, which represents the current event. With the ``getName(e)`` -method, you can get the name of the triggered event. The global -datastore is available as "d". In legacy code, you might see "e.data" -used to get the datastore. However, realize that "e.data" is deprecated -and you should use "d" going forward. - -The context of the datastore is appropriate to the event in question. -For example, "BuildStarted" and "BuildCompleted" events run before any -tasks are executed so would be in the global configuration datastore -namespace. No recipe-specific metadata exists in that namespace. The -"BuildStarted" and "BuildCompleted" events also run in the main -cooker/server process rather than any worker context. Thus, any changes -made to the datastore would be seen by other cooker/server events within -the current build but not seen outside of that build or in any worker -context. Task events run in the actual tasks in question consequently -have recipe-specific and task-specific contents. These events run in the -worker context and are discarded at the end of task execution. - -During a standard build, the following common events might occur. The -following events are the most common kinds of events that most metadata -might have an interest in viewing: - -- ``bb.event.ConfigParsed()``: Fired when the base configuration; which - consists of ``bitbake.conf``, ``base.bbclass`` and any global - :term:`INHERIT` statements; has been parsed. You can see multiple such - events when each of the workers parse the base configuration or if - the server changes configuration and reparses. Any given datastore - only has one such event executed against it, however. If - :term:`BB_INVALIDCONF` is set in the datastore by the event - handler, the configuration is reparsed and a new event triggered, - allowing the metadata to update configuration. - -- ``bb.event.HeartbeatEvent()``: Fires at regular time intervals of one - second. You can configure the interval time using the - ``BB_HEARTBEAT_EVENT`` variable. The event's "time" attribute is the - ``time.time()`` value when the event is triggered. This event is - useful for activities such as system state monitoring. - -- ``bb.event.ParseStarted()``: Fired when BitBake is about to start - parsing recipes. This event's "total" attribute represents the number - of recipes BitBake plans to parse. - -- ``bb.event.ParseProgress()``: Fired as parsing progresses. This - event's "current" attribute is the number of recipes parsed as well - as the "total" attribute. - -- ``bb.event.ParseCompleted()``: Fired when parsing is complete. This - event's "cached", "parsed", "skipped", "virtuals", "masked", and - "errors" attributes provide statistics for the parsing results. - -- ``bb.event.BuildStarted()``: Fired when a new build starts. BitBake - fires multiple "BuildStarted" events (one per configuration) when - multiple configuration (multiconfig) is enabled. - -- ``bb.build.TaskStarted()``: Fired when a task starts. This event's - "taskfile" attribute points to the recipe from which the task - originates. The "taskname" attribute, which is the task's name, - includes the ``do_`` prefix, and the "logfile" attribute point to - where the task's output is stored. Finally, the "time" attribute is - the task's execution start time. - -- ``bb.build.TaskInvalid()``: Fired if BitBake tries to execute a task - that does not exist. - -- ``bb.build.TaskFailedSilent()``: Fired for setscene tasks that fail - and should not be presented to the user verbosely. - -- ``bb.build.TaskFailed()``: Fired for normal tasks that fail. - -- ``bb.build.TaskSucceeded()``: Fired when a task successfully - completes. - -- ``bb.event.BuildCompleted()``: Fired when a build finishes. - -- ``bb.cooker.CookerExit()``: Fired when the BitBake server/cooker - shuts down. This event is usually only seen by the UIs as a sign they - should also shutdown. - -This next list of example events occur based on specific requests to the -server. These events are often used to communicate larger pieces of -information from the BitBake server to other parts of BitBake such as -user interfaces: - -- ``bb.event.TreeDataPreparationStarted()`` -- ``bb.event.TreeDataPreparationProgress()`` -- ``bb.event.TreeDataPreparationCompleted()`` -- ``bb.event.DepTreeGenerated()`` -- ``bb.event.CoreBaseFilesFound()`` -- ``bb.event.ConfigFilePathFound()`` -- ``bb.event.FilesMatchingFound()`` -- ``bb.event.ConfigFilesFound()`` -- ``bb.event.TargetsTreeGenerated()`` - -.. _variants-class-extension-mechanism: - -Variants --- Class Extension Mechanism -====================================== - -BitBake supports multiple incarnations of a recipe file via the -:term:`BBCLASSEXTEND` variable. - -The :term:`BBCLASSEXTEND` variable is a space separated list of classes used -to "extend" the recipe for each variant. Here is an example that results in a -second incarnation of the current recipe being available. This second -incarnation will have the "native" class inherited. :: - - BBCLASSEXTEND = "native" - -.. note:: - - The mechanism for this class extension is extremely specific to the - implementation. Usually, the recipe's :term:`PROVIDES` , :term:`PN` , and - :term:`DEPENDS` variables would need to be modified by the extension - class. For specific examples, see the OE-Core native , nativesdk , and - multilib classes. - -Dependencies -============ - -To allow for efficient parallel processing, BitBake handles dependencies -at the task level. Dependencies can exist both between tasks within a -single recipe and between tasks in different recipes. Following are -examples of each: - -- For tasks within a single recipe, a recipe's ``do_configure`` task - might need to complete before its ``do_compile`` task can run. - -- For tasks in different recipes, one recipe's ``do_configure`` task - might require another recipe's ``do_populate_sysroot`` task to finish - first such that the libraries and headers provided by the other - recipe are available. - -This section describes several ways to declare dependencies. Remember, -even though dependencies are declared in different ways, they are all -simply dependencies between tasks. - -.. _dependencies-internal-to-the-bb-file: - -Dependencies Internal to the ``.bb`` File ------------------------------------------ - -BitBake uses the ``addtask`` directive to manage dependencies that are -internal to a given recipe file. You can use the ``addtask`` directive -to indicate when a task is dependent on other tasks or when other tasks -depend on that recipe. Here is an example:: - - addtask printdate after do_fetch before do_build - -In this example, the ``do_printdate`` task -depends on the completion of the ``do_fetch`` task, and the ``do_build`` -task depends on the completion of the ``do_printdate`` task. - -.. note:: - - For a task to run, it must be a direct or indirect dependency of some - other task that is scheduled to run. - - For illustration, here are some examples: - - - The directive ``addtask mytask before do_configure`` causes - ``do_mytask`` to run before ``do_configure`` runs. Be aware that - ``do_mytask`` still only runs if its :ref:`input - checksum ` has changed since the last time it was - run. Changes to the input checksum of ``do_mytask`` also - indirectly cause ``do_configure`` to run. - - - The directive ``addtask mytask after do_configure`` by itself - never causes ``do_mytask`` to run. ``do_mytask`` can still be run - manually as follows:: - - $ bitbake recipe -c mytask - - Declaring ``do_mytask`` as a dependency of some other task that is - scheduled to run also causes it to run. Regardless, the task runs after - ``do_configure``. - -Build Dependencies ------------------- - -BitBake uses the :term:`DEPENDS` variable to manage -build time dependencies. The ``[deptask]`` varflag for tasks signifies -the task of each item listed in :term:`DEPENDS` that must complete before -that task can be executed. Here is an example:: - - do_configure[deptask] = "do_populate_sysroot" - -In this example, the ``do_populate_sysroot`` task -of each item in :term:`DEPENDS` must complete before ``do_configure`` can -execute. - -Runtime Dependencies --------------------- - -BitBake uses the :term:`PACKAGES`, :term:`RDEPENDS`, and :term:`RRECOMMENDS` -variables to manage runtime dependencies. - -The :term:`PACKAGES` variable lists runtime packages. Each of those packages -can have :term:`RDEPENDS` and :term:`RRECOMMENDS` runtime dependencies. The -``[rdeptask]`` flag for tasks is used to signify the task of each item -runtime dependency which must have completed before that task can be -executed. :: - - do_package_qa[rdeptask] = "do_packagedata" - -In the previous -example, the ``do_packagedata`` task of each item in :term:`RDEPENDS` must -have completed before ``do_package_qa`` can execute. -Although :term:`RDEPENDS` contains entries from the -runtime dependency namespace, BitBake knows how to map them back -to the build-time dependency namespace, in which the tasks are defined. - -Recursive Dependencies ----------------------- - -BitBake uses the ``[recrdeptask]`` flag to manage recursive task -dependencies. BitBake looks through the build-time and runtime -dependencies of the current recipe, looks through the task's inter-task -dependencies, and then adds dependencies for the listed task. Once -BitBake has accomplished this, it recursively works through the -dependencies of those tasks. Iterative passes continue until all -dependencies are discovered and added. - -The ``[recrdeptask]`` flag is most commonly used in high-level recipes -that need to wait for some task to finish "globally". For example, -``image.bbclass`` has the following:: - - do_rootfs[recrdeptask] += "do_packagedata" - -This statement says that the ``do_packagedata`` task of -the current recipe and all recipes reachable (by way of dependencies) -from the image recipe must run before the ``do_rootfs`` task can run. - -BitBake allows a task to recursively depend on itself by -referencing itself in the task list:: - - do_a[recrdeptask] = "do_a do_b" - -In the same way as before, this means that the ``do_a`` -and ``do_b`` tasks of the current recipe and all -recipes reachable (by way of dependencies) from the recipe -must run before the ``do_a`` task can run. In this -case BitBake will ignore the current recipe's ``do_a`` -task circular dependency on itself. - -Inter-Task Dependencies ------------------------ - -BitBake uses the ``[depends]`` flag in a more generic form to manage -inter-task dependencies. This more generic form allows for -inter-dependency checks for specific tasks rather than checks for the -data in :term:`DEPENDS`. Here is an example:: - - do_patch[depends] = "quilt-native:do_populate_sysroot" - -In this example, the ``do_populate_sysroot`` task of the target ``quilt-native`` -must have completed before the ``do_patch`` task can execute. - -The ``[rdepends]`` flag works in a similar way but takes targets in the -runtime namespace instead of the build-time dependency namespace. - -Functions You Can Call From Within Python -========================================= - -BitBake provides many functions you can call from within Python -functions. This section lists the most commonly used functions, and -mentions where to find others. - -Functions for Accessing Datastore Variables -------------------------------------------- - -It is often necessary to access variables in the BitBake datastore using -Python functions. The BitBake datastore has an API that allows you this -access. Here is a list of available operations: - -.. list-table:: - :widths: auto - :header-rows: 1 - - * - *Operation* - - *Description* - * - ``d.getVar("X", expand)`` - - Returns the value of variable "X". Using "expand=True" expands the - value. Returns "None" if the variable "X" does not exist. - * - ``d.setVar("X", "value")`` - - Sets the variable "X" to "value" - * - ``d.appendVar("X", "value")`` - - Adds "value" to the end of the variable "X". Acts like ``d.setVar("X", - "value")`` if the variable "X" does not exist. - * - ``d.prependVar("X", "value")`` - - Adds "value" to the start of the variable "X". Acts like - ``d.setVar("X","value")`` if the variable "X" does not exist. - * - ``d.delVar("X")`` - - Deletes the variable "X" from the datastore. Does nothing if the variable - "X" does not exist. - * - ``d.renameVar("X", "Y")`` - - Renames the variable "X" to "Y". Does nothing if the variable "X" does - not exist. - * - ``d.getVarFlag("X", flag, expand)`` - - Returns the value of variable "X". Using "expand=True" expands the - value. Returns "None" if either the variable "X" or the named flag does - not exist. - * - ``d.setVarFlag("X", flag, "value")`` - - Sets the named flag for variable "X" to "value". - * - ``d.appendVarFlag("X", flag, "value")`` - - Appends "value" to the named flag on the variable "X". Acts like - ``d.setVarFlag("X", flag, "value")`` if the named flag does not exist. - * - ``d.prependVarFlag("X", flag, "value")`` - - Prepends "value" to the named flag on the variable "X". Acts like - ``d.setVarFlag("X", flag, "value")`` if the named flag does not exist. - * - ``d.delVarFlag("X", flag)`` - - Deletes the named flag on the variable "X" from the datastore. - * - ``d.setVarFlags("X", flagsdict)`` - - Sets the flags specified in the ``flagsdict()`` - parameter. ``setVarFlags`` does not clear previous flags. Think of this - operation as ``addVarFlags``. - * - ``d.getVarFlags("X")`` - - Returns a ``flagsdict`` of the flags for the variable "X". Returns "None" - if the variable "X" does not exist. - * - ``d.delVarFlags("X")`` - - Deletes all the flags for the variable "X". Does nothing if the variable - "X" does not exist. - * - ``d.expand(expression)`` - - Expands variable references in the specified string - expression. References to variables that do not exist are left as is. For - example, ``d.expand("foo ${X}")`` expands to the literal string "foo - ${X}" if the variable "X" does not exist. - -Other Functions ---------------- - -Other functions are documented in the -:doc:`/bitbake-user-manual/bitbake-user-manual-library-functions` document. - -Extending Python Library Code ------------------------------ - -If you wish to add your own Python library code (e.g. to provide -functions/classes you can use from Python functions in the metadata) -you can do so from any layer using the ``addpylib`` directive. -This directive is typically added to your layer configuration ( -``conf/layer.conf``) although it will be handled in any ``.conf`` file. - -Usage is of the form:: - - addpylib - -Where specifies the directory to add to the library path. -The specified is imported automatically, and if the imported -module specifies an attribute named ``BBIMPORTS``, that list of -sub-modules is iterated and imported too. - -Testing and Debugging BitBake Python code ------------------------------------------ - -The OpenEmbedded build system implements a convenient ``pydevshell`` target which -you can use to access the BitBake datastore and experiment with your own Python -code. See :yocto_docs:`Using a Python Development Shell -` in the Yocto -Project manual for details. - -Task Checksums and Setscene -=========================== - -BitBake uses checksums (or signatures) along with the setscene to -determine if a task needs to be run. This section describes the process. -To help understand how BitBake does this, the section assumes an -OpenEmbedded metadata-based example. - -These checksums are stored in :term:`STAMP`. You can -examine the checksums using the following BitBake command:: - - $ bitbake-dumpsig - -This command returns the signature data in a readable -format that allows you to examine the inputs used when the OpenEmbedded -build system generates signatures. For example, using -``bitbake-dumpsig`` allows you to examine the ``do_compile`` task's -"sigdata" for a C application (e.g. ``bash``). Running the command also -reveals that the "CC" variable is part of the inputs that are hashed. -Any changes to this variable would invalidate the stamp and cause the -``do_compile`` task to run. - -The following list describes related variables: - -- :term:`BB_HASHCHECK_FUNCTION`: - Specifies the name of the function to call during the "setscene" part - of the task's execution in order to validate the list of task hashes. - -- :term:`BB_SETSCENE_DEPVALID`: - Specifies a function BitBake calls that determines whether BitBake - requires a setscene dependency to be met. - -- :term:`BB_TASKHASH`: Within an executing task, - this variable holds the hash of the task as returned by the currently - enabled signature generator. - -- :term:`STAMP`: The base path to create stamp files. - -- :term:`STAMPCLEAN`: Again, the base path to - create stamp files but can use wildcards for matching a range of - files for clean operations. - -Wildcard Support in Variables -============================= - -Support for wildcard use in variables varies depending on the context in -which it is used. For example, some variables and filenames allow -limited use of wildcards through the "``%``" and "``*``" characters. -Other variables or names support Python's -`glob `_ syntax, -`fnmatch `_ -syntax, or -`Regular Expression (re) `_ -syntax. - -For variables that have wildcard suport, the documentation describes -which form of wildcard, its use, and its limitations. diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables-context.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables-context.rst deleted file mode 100644 index e9c454ba11..0000000000 --- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables-context.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -================ -Variable Context -================ - -| - -Variables might only have an impact or can be used in certain contexts. Some -should only be used in global files like ``.conf``, while others are intended only -for local files like ``.bb``. This chapter aims to describe some important variable -contexts. - -.. _ref-varcontext-configuration: - -BitBake's own configuration -=========================== - -Variables starting with ``BB_`` usually configure the behaviour of BitBake itself. -For example, one could configure: - -- System resources, like disk space to be used (:term:`BB_DISKMON_DIRS`), - or the number of tasks to be run in parallel by BitBake (:term:`BB_NUMBER_THREADS`). - -- How the fetchers shall behave, e.g., :term:`BB_FETCH_PREMIRRORONLY` is used - by BitBake to determine if BitBake's fetcher shall search only - :term:`PREMIRRORS` for files. - -Those variables are usually configured globally. - -BitBake configuration -===================== - -There are variables: - -- Like :term:`B` or :term:`T`, that are used to specify directories used by - BitBake during the build of a particular recipe. Those variables are - specified in ``bitbake.conf``. Some, like :term:`B`, are quite often - overwritten in recipes. - -- Starting with ``FAKEROOT``, to configure how the ``fakeroot`` command is - handled. Those are usually set by ``bitbake.conf`` and might get adapted in a - ``bbclass``. - -- Detailing where BitBake will store and fetch information from, for - data reuse between build runs like :term:`CACHE`, :term:`DL_DIR` or - :term:`PERSISTENT_DIR`. Those are usually global. - - -Layers and files -================ - -Variables starting with ``LAYER`` configure how BitBake handles layers. -Additionally, variables starting with ``BB`` configure how layers and files are -handled. For example: - -- :term:`LAYERDEPENDS` is used to configure on which layers a given layer - depends. - -- The configured layers are contained in :term:`BBLAYERS` and files in - :term:`BBFILES`. - -Those variables are often used in the files ``layer.conf`` and ``bblayers.conf``. - -Recipes and packages -==================== - -Variables handling recipes and packages can be split into: - -- :term:`PN`, :term:`PV` or :term:`PF` for example, contain information about - the name or revision of a recipe or package. Usually, the default set in - ``bitbake.conf`` is used, but those are from time to time overwritten in - recipes. - -- :term:`SUMMARY`, :term:`DESCRIPTION`, :term:`LICENSE` or :term:`HOMEPAGE` - contain the expected information and should be set specifically for every - recipe. - -- In recipes, variables are also used to control build and runtime - dependencies between recipes/packages with other recipes/packages. The - most common should be: :term:`PROVIDES`, :term:`RPROVIDES`, :term:`DEPENDS`, - and :term:`RDEPENDS`. - -- There are further variables starting with ``SRC`` that specify the sources in - a recipe like :term:`SRC_URI` or :term:`SRCDATE`. Those are also usually set - in recipes. - -- Which version or provider of a recipe should be given preference when - multiple recipes would provide the same item, is controlled by variables - starting with ``PREFERRED_``. Those are normally set in the configuration - files of a ``MACHINE`` or ``DISTRO``. diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst deleted file mode 100644 index 734c7858a2..0000000000 --- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst +++ /dev/null @@ -1,1728 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -================== -Variables Glossary -================== - -| - -This chapter lists common variables used by BitBake and gives an -overview of their function and contents. - -.. note:: - - Following are some points regarding the variables listed in this - glossary: - - - The variables listed in this glossary are specific to BitBake. - Consequently, the descriptions are limited to that context. - - - Also, variables exist in other systems that use BitBake (e.g. The - Yocto Project and OpenEmbedded) that have names identical to those - found in this glossary. For such cases, the variables in those - systems extend the functionality of the variable as it is - described here in this glossary. - -.. glossary:: - :sorted: - - :term:`ASSUME_PROVIDED` - Lists recipe names (:term:`PN` values) BitBake does not - attempt to build. Instead, BitBake assumes these recipes have already - been built. - - In OpenEmbedded-Core, :term:`ASSUME_PROVIDED` mostly specifies native - tools that should not be built. An example is ``git-native``, which - when specified allows for the Git binary from the host to be used - rather than building ``git-native``. - - :term:`AUTOREV` - This is a special variable used during fetching. When :term:`SRCREV` is - set to the value of this variable, the latest revision from the version - controlled source code repository is used. - It should be set as follows:: - - SRCREV = "${AUTOREV}" - - :term:`AZ_SAS` - Azure Storage Shared Access Signature, when using the - :ref:`Azure Storage fetcher ` - This variable can be defined to be used by the fetcher to authenticate - and gain access to non-public artifacts:: - - AZ_SAS = ""se=2021-01-01&sp=r&sv=2018-11-09&sr=c&skoid=&sig="" - - For more information see Microsoft's Azure Storage documentation at - https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview - - - :term:`B` - The directory in which BitBake executes functions during a recipe's - build process. - - :term:`BB_ALLOWED_NETWORKS` - Specifies a space-delimited list of hosts that the fetcher is allowed - to use to obtain the required source code. Following are - considerations surrounding this variable: - - - This host list is only used if - :term:`BB_NO_NETWORK` is either not set or - set to "0". - - - Limited support for the "``*``" wildcard character for matching - against the beginning of host names exists. For example, the - following setting matches ``git.gnu.org``, ``ftp.gnu.org``, and - ``foo.git.gnu.org``. :: - - BB_ALLOWED_NETWORKS = "\*.gnu.org" - - .. important:: - - The use of the "``*``" character only works at the beginning of - a host name and it must be isolated from the remainder of the - host name. You cannot use the wildcard character in any other - location of the name or combined with the front part of the - name. - - For example, ``*.foo.bar`` is supported, while ``*aa.foo.bar`` - is not. - - - Mirrors not in the host list are skipped and logged in debug. - - - Attempts to access networks not in the host list cause a failure. - - Using :term:`BB_ALLOWED_NETWORKS` in conjunction with - :term:`PREMIRRORS` is very useful. Adding the - host you want to use to :term:`PREMIRRORS` results in the source code - being fetched from an allowed location and avoids raising an error - when a host that is not allowed is in a - :term:`SRC_URI` statement. This is because the - fetcher does not attempt to use the host listed in :term:`SRC_URI` after - a successful fetch from the :term:`PREMIRRORS` occurs. - - :term:`BB_BASEHASH_IGNORE_VARS` - Lists variables that are excluded from checksum and dependency data. - Variables that are excluded can therefore change without affecting - the checksum mechanism. A common example would be the variable for - the path of the build. BitBake's output should not (and usually does - not) depend on the directory in which it was built. - - :term:`BB_CACHEDIR` - Specifies the code parser cache directory (distinct from :term:`CACHE` - and :term:`PERSISTENT_DIR` although they can be set to the same value - if desired). The default value is "${TOPDIR}/cache". - - :term:`BB_CHECK_SSL_CERTS` - Specifies if SSL certificates should be checked when fetching. The default - value is ``1`` and certificates are not checked if the value is set to ``0``. - - :term:`BB_HASH_CODEPARSER_VALS` - Specifies values for variables to use when populating the codeparser cache. - This can be used selectively to set dummy values for variables to avoid - the codeparser cache growing on every parse. Variables that would typically - be included are those where the value is not significant for where the - codeparser cache is used (i.e. when calculating variable dependencies for - code fragments.) The value is space-separated without quoting values, for - example:: - - BB_HASH_CODEPARSER_VALS = "T=/ WORKDIR=/ DATE=1234 TIME=1234" - - :term:`BB_CONSOLELOG` - Specifies the path to a log file into which BitBake's user interface - writes output during the build. - - :term:`BB_CURRENTTASK` - Contains the name of the currently running task. The name does not - include the ``do_`` prefix. - - :term:`BB_CURRENT_MC` - Contains the name of the current multiconfig a task is being run under. - The name is taken from the multiconfig configuration file (a file - ``mc1.conf`` would make this variable equal to ``mc1``). - - :term:`BB_DEFAULT_TASK` - The default task to use when none is specified (e.g. with the ``-c`` - command line option). The task name specified should not include the - ``do_`` prefix. - - :term:`BB_DEFAULT_UMASK` - The default umask to apply to tasks if specified and no task specific - umask flag is set. - - :term:`BB_DEFER_BBCLASSES` - The classes listed in this variable have their :ref:`inherit - ` calls automatically promoted - to deferred inherits. See :ref:`inherit_defer - ` for more information on - deferred inherits. - - This means that if :term:`BB_DEFER_BBCLASSES` is set as follows:: - - BB_DEFER_BBCLASSES = "foo" - - The following statement:: - - inherit foo - - Will automatically be equal to calling:: - - inherit_defer foo - - :term:`BB_DISKMON_DIRS` - Monitors disk space and available inodes during the build and allows - you to control the build based on these parameters. - - Disk space monitoring is disabled by default. When setting this - variable, use the following form:: - - BB_DISKMON_DIRS = ",, [...]" - - where: - - is: - HALT: Immediately halt the build when - a threshold is broken. - STOPTASKS: Stop the build after the currently - executing tasks have finished when - a threshold is broken. - WARN: Issue a warning but continue the - build when a threshold is broken. - Subsequent warnings are issued as - defined by the - BB_DISKMON_WARNINTERVAL variable, - which must be defined. - - is: - Any directory you choose. You can specify one or - more directories to monitor by separating the - groupings with a space. If two directories are - on the same device, only the first directory - is monitored. - - is: - Either the minimum available disk space, - the minimum number of free inodes, or - both. You must specify at least one. To - omit one or the other, simply omit the value. - Specify the threshold using G, M, K for Gbytes, - Mbytes, and Kbytes, respectively. If you do - not specify G, M, or K, Kbytes is assumed by - default. Do not use GB, MB, or KB. - - Here are some examples:: - - BB_DISKMON_DIRS = "HALT,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K" - BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G" - BB_DISKMON_DIRS = "HALT,${TMPDIR},,100K" - - The first example works only if you also set the - :term:`BB_DISKMON_WARNINTERVAL` - variable. This example causes the build system to immediately halt - when either the disk space in ``${TMPDIR}`` drops below 1 Gbyte or - the available free inodes drops below 100 Kbytes. Because two - directories are provided with the variable, the build system also - issues a warning when the disk space in the ``${SSTATE_DIR}`` - directory drops below 1 Gbyte or the number of free inodes drops - below 100 Kbytes. Subsequent warnings are issued during intervals as - defined by the :term:`BB_DISKMON_WARNINTERVAL` variable. - - The second example stops the build after all currently executing - tasks complete when the minimum disk space in the ``${TMPDIR}`` - directory drops below 1 Gbyte. No disk monitoring occurs for the free - inodes in this case. - - The final example immediately halts the build when the number of - free inodes in the ``${TMPDIR}`` directory drops below 100 Kbytes. No - disk space monitoring for the directory itself occurs in this case. - - :term:`BB_DISKMON_WARNINTERVAL` - Defines the disk space and free inode warning intervals. - - If you are going to use the :term:`BB_DISKMON_WARNINTERVAL` variable, you - must also use the :term:`BB_DISKMON_DIRS` - variable and define its action as "WARN". During the build, - subsequent warnings are issued each time disk space or number of free - inodes further reduces by the respective interval. - - If you do not provide a :term:`BB_DISKMON_WARNINTERVAL` variable and you - do use :term:`BB_DISKMON_DIRS` with the "WARN" action, the disk - monitoring interval defaults to the following: - BB_DISKMON_WARNINTERVAL = "50M,5K" - - When specifying the variable in your configuration file, use the - following form:: - - BB_DISKMON_WARNINTERVAL = "," - - where: - - is: - An interval of memory expressed in either - G, M, or K for Gbytes, Mbytes, or Kbytes, - respectively. You cannot use GB, MB, or KB. - - is: - An interval of free inodes expressed in either - G, M, or K for Gbytes, Mbytes, or Kbytes, - respectively. You cannot use GB, MB, or KB. - - Here is an example:: - - BB_DISKMON_DIRS = "WARN,${SSTATE_DIR},1G,100K" - BB_DISKMON_WARNINTERVAL = "50M,5K" - - These variables cause BitBake to - issue subsequent warnings each time the available disk space further - reduces by 50 Mbytes or the number of free inodes further reduces by - 5 Kbytes in the ``${SSTATE_DIR}`` directory. Subsequent warnings - based on the interval occur each time a respective interval is - reached beyond the initial warning (i.e. 1 Gbytes and 100 Kbytes). - - :term:`BB_ENV_PASSTHROUGH` - Specifies the internal list of variables to allow through from - the external environment into BitBake's datastore. If the value of - this variable is not specified (which is the default), the following - list is used: :term:`BBPATH`, :term:`BB_PRESERVE_ENV`, - :term:`BB_ENV_PASSTHROUGH`, and :term:`BB_ENV_PASSTHROUGH_ADDITIONS`. - - .. note:: - - You must set this variable in the external environment in order - for it to work. - - :term:`BB_ENV_PASSTHROUGH_ADDITIONS` - Specifies an additional set of variables to allow through from the - external environment into BitBake's datastore. This list of variables - are on top of the internal list set in - :term:`BB_ENV_PASSTHROUGH`. - - .. note:: - - You must set this variable in the external environment in order - for it to work. - - :term:`BB_FETCH_PREMIRRORONLY` - When set to "1", causes BitBake's fetcher module to only search - :term:`PREMIRRORS` for files. BitBake will not - search the main :term:`SRC_URI` or - :term:`MIRRORS`. - - :term:`BB_FILENAME` - Contains the filename of the recipe that owns the currently running - task. For example, if the ``do_fetch`` task that resides in the - ``my-recipe.bb`` is executing, the :term:`BB_FILENAME` variable contains - "/foo/path/my-recipe.bb". - - :term:`BB_GENERATE_MIRROR_TARBALLS` - Causes tarballs of the Git repositories, including the Git metadata, - to be placed in the :term:`DL_DIR` directory. Anyone - wishing to create a source mirror would want to enable this variable. - - For performance reasons, creating and placing tarballs of the Git - repositories is not the default action by BitBake. :: - - BB_GENERATE_MIRROR_TARBALLS = "1" - - :term:`BB_GENERATE_SHALLOW_TARBALLS` - Setting this variable to "1" when :term:`BB_GIT_SHALLOW` is also set to - "1" causes bitbake to generate shallow mirror tarballs when fetching git - repositories. The number of commits included in the shallow mirror - tarballs is controlled by :term:`BB_GIT_SHALLOW_DEPTH`. - - If both :term:`BB_GIT_SHALLOW` and :term:`BB_GENERATE_MIRROR_TARBALLS` are - enabled, bitbake will generate shallow mirror tarballs by default for git - repositories. This separate variable exists so that shallow tarball - generation can be enabled without needing to also enable normal mirror - generation if it is not desired. - - For example usage, see :term:`BB_GIT_SHALLOW`. - - :term:`BB_GIT_DEFAULT_DESTSUFFIX` - The default destination directory where the :ref:`Git fetcher - ` unpacks the source code. If this variable is not set, the - source code is unpacked in a directory named "git". - - :term:`BB_GIT_SHALLOW` - Setting this variable to "1" enables the support for fetching, using and - generating mirror tarballs of `shallow git repositories `_. - The external `git-make-shallow `_ - script is used for shallow mirror tarball creation. - - When :term:`BB_GIT_SHALLOW` is enabled, bitbake will attempt to fetch a shallow - mirror tarball. If the shallow mirror tarball cannot be fetched, it will - try to fetch the full mirror tarball and use that. - - This setting causes an initial shallow clone instead of an initial full bare clone. - The amount of data transferred during the initial clone will be significantly reduced. - - However, every time the source revision (referenced in :term:`SRCREV`) - changes, regardless of whether the cache within the download directory - (defined by :term:`DL_DIR`) has been cleaned up or not, - the data transfer may be significantly higher because entirely - new shallow clones are required for each source revision change. - - Over time, numerous shallow clones may cumulatively transfer - the same amount of data as an initial full bare clone. - This is especially the case with very large repositories. - - Existing initial full bare clones, created without this setting, - will still be utilized. - - If the Git error "Server does not allow request for unadvertised object" - occurs, an initial full bare clone is fetched automatically. - This may happen if the Git server does not allow the request - or if the Git client has issues with this functionality. - - See also :term:`BB_GIT_SHALLOW_DEPTH` and - :term:`BB_GENERATE_SHALLOW_TARBALLS`. - - Example usage:: - - BB_GIT_SHALLOW ?= "1" - - # Keep only the top commit - BB_GIT_SHALLOW_DEPTH ?= "1" - - # This defaults to enabled if both BB_GIT_SHALLOW and - # BB_GENERATE_MIRROR_TARBALLS are enabled - BB_GENERATE_SHALLOW_TARBALLS ?= "1" - - :term:`BB_GIT_SHALLOW_DEPTH` - When used with :term:`BB_GENERATE_SHALLOW_TARBALLS`, this variable sets - the number of commits to include in generated shallow mirror tarballs. - With a depth of 1, only the commit referenced in :term:`SRCREV` is - included in the shallow mirror tarball. Increasing the depth includes - additional parent commits, working back through the commit history. - - If this variable is unset, bitbake will default to a depth of 1 when - generating shallow mirror tarballs. - - For example usage, see :term:`BB_GIT_SHALLOW`. - - :term:`BB_GLOBAL_PYMODULES` - Specifies the list of Python modules to place in the global namespace. - It is intended that only the core layer should set this and it is meant - to be a very small list, typically just ``os`` and ``sys``. - :term:`BB_GLOBAL_PYMODULES` is expected to be set before the first - ``addpylib`` directive. - See also ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:extending python library code`". - - :term:`BB_HASHCHECK_FUNCTION` - Specifies the name of the function to call during the "setscene" part - of the task's execution in order to validate the list of task hashes. - The function returns the list of setscene tasks that should be - executed. - - At this point in the execution of the code, the objective is to - quickly verify if a given setscene function is likely to work or not. - It's easier to check the list of setscene functions in one pass than - to call many individual tasks. The returned list need not be - completely accurate. A given setscene task can still later fail. - However, the more accurate the data returned, the more efficient the - build will be. - - :term:`BB_HASHCONFIG_IGNORE_VARS` - Lists variables that are excluded from base configuration checksum, - which is used to determine if the cache can be reused. - - One of the ways BitBake determines whether to re-parse the main - metadata is through checksums of the variables in the datastore of - the base configuration data. There are variables that you typically - want to exclude when checking whether or not to re-parse and thus - rebuild the cache. As an example, you would usually exclude ``TIME`` - and ``DATE`` because these variables are always changing. If you did - not exclude them, BitBake would never reuse the cache. - - :term:`BB_HASHSERVE` - Specifies the Hash Equivalence server to use. - - If set to ``auto``, BitBake automatically starts its own server - over a UNIX domain socket. An option is to connect this server - to an upstream one, by setting :term:`BB_HASHSERVE_UPSTREAM`. - - If set to ``unix://path``, BitBake will connect to an existing - hash server available over a UNIX domain socket. - - If set to ``host:port``, BitBake will connect to a remote server on the - specified host. This allows multiple clients to share the same - hash equivalence data. - - The remote server can be started manually through - the ``bin/bitbake-hashserv`` script provided by BitBake, - which supports UNIX domain sockets too. This script also allows - to start the server in read-only mode, to avoid accepting - equivalences that correspond to Share State caches that are - only available on specific clients. - - :term:`BB_HASHSERVE_UPSTREAM` - Specifies an upstream Hash Equivalence server. - - This optional setting is only useful when a local Hash Equivalence - server is started (setting :term:`BB_HASHSERVE` to ``auto``), - and you wish the local server to query an upstream server for - Hash Equivalence data. - - Example usage:: - - BB_HASHSERVE_UPSTREAM = "hashserv.yoctoproject.org:8686" - - :term:`BB_INVALIDCONF` - Used in combination with the ``ConfigParsed`` event to trigger - re-parsing the base metadata (i.e. all the recipes). The - ``ConfigParsed`` event can set the variable to trigger the re-parse. - You must be careful to avoid recursive loops with this functionality. - - :term:`BB_LOADFACTOR_MAX` - Setting this to a value will cause BitBake to check the system load - average before executing new tasks. If the load average is above the - the number of CPUs multipled by this factor, no new task will be started - unless there is no task executing. A value of "1.5" has been found to - work reasonably. This is helpful for systems which don't have pressure - regulation enabled, which is more granular. Pressure values take - precedence over loadfactor. - - :term:`BB_LOGCONFIG` - Specifies the name of a config file that contains the user logging - configuration. See - :ref:`bitbake-user-manual/bitbake-user-manual-execution:logging` - for additional information - - :term:`BB_LOGFMT` - Specifies the name of the log files saved into - ``${``\ :term:`T`\ ``}``. By default, the :term:`BB_LOGFMT` - variable is undefined and the log filenames get created using the - following form:: - - log.{task}.{pid} - - If you want to force log files to take a specific name, you can set this - variable in a configuration file. - - :term:`BB_MULTI_PROVIDER_ALLOWED` - Allows you to suppress BitBake warnings caused when building two - separate recipes that provide the same output. - - BitBake normally issues a warning when building two different recipes - where each provides the same output. This scenario is usually - something the user does not want. However, cases do exist where it - makes sense, particularly in the ``virtual/*`` namespace. You can use - this variable to suppress BitBake's warnings. - - To use the variable, list provider names (e.g. recipe names, - ``virtual/kernel``, and so forth). - - :term:`BB_NICE_LEVEL` - Allows BitBake to run at a specific priority (i.e. nice level). - System permissions usually mean that BitBake can reduce its priority - but not raise it again. See :term:`BB_TASK_NICE_LEVEL` for - additional information. - - :term:`BB_NO_NETWORK` - Disables network access in the BitBake fetcher modules. With this - access disabled, any command that attempts to access the network - becomes an error. - - Disabling network access is useful for testing source mirrors, - running builds when not connected to the Internet, and when operating - in certain kinds of firewall environments. - - :term:`BB_NUMBER_PARSE_THREADS` - Sets the number of threads BitBake uses when parsing. By default, the - number of threads is equal to the number of cores on the system. - - :term:`BB_NUMBER_THREADS` - The maximum number of tasks BitBake should run in parallel at any one - time. If your host development system supports multiple cores, a good - rule of thumb is to set this variable to twice the number of cores. - - :term:`BB_ORIGENV` - Contains a copy of the original external environment in which BitBake - was run. The copy is taken before any variable values configured to - pass through from the external environment are filtered into BitBake's - datastore. - - .. note:: - - The contents of this variable is a datastore object that can be - queried using the normal datastore operations. - - :term:`BB_PRESERVE_ENV` - Disables environment filtering and instead allows all variables through - from the external environment into BitBake's datastore. - - .. note:: - - You must set this variable in the external environment in order - for it to work. - - :term:`BB_PRESSURE_MAX_CPU` - Specifies a maximum CPU pressure threshold, above which BitBake's - scheduler will not start new tasks (providing there is at least - one active task). If no value is set, CPU pressure is not - monitored when starting tasks. - - The pressure data is calculated based upon what Linux kernels since - version 4.20 expose under ``/proc/pressure``. The threshold represents - the difference in "total" pressure from the previous second. The - minimum value is 1.0 (extremely slow builds) and the maximum is - 1000000 (a pressure value unlikely to ever be reached). See - https://docs.kernel.org/accounting/psi.html for more information. - - A default value to limit the CPU pressure to be set in ``conf/local.conf`` - could be:: - - BB_PRESSURE_MAX_CPU = "15000" - - Multiple values should be tested on the build host to determine what suits - best, depending on the need for performance versus load average during - the build. - - .. note:: - - You may see numerous messages printed by BitBake in case the - :term:`BB_PRESSURE_MAX_CPU` is too low:: - - Pressure status changed to CPU: True, IO: False, Mem: False (CPU: 1105.9/2.0, IO: 0.0/2.0, Mem: 0.0/2.0) - using 1/64 bitbake threads - - This means that the :term:`BB_PRESSURE_MAX_CPU` should be increased to - a reasonable value for limiting the CPU pressure on the system. - Monitor the varying value after ``CPU:`` above to set a sensible value. - - :term:`BB_PRESSURE_MAX_IO` - Specifies a maximum I/O pressure threshold, above which BitBake's - scheduler will not start new tasks (providing there is at least - one active task). If no value is set, I/O pressure is not - monitored when starting tasks. - - The pressure data is calculated based upon what Linux kernels since - version 4.20 expose under ``/proc/pressure``. The threshold represents - the difference in "total" pressure from the previous second. The - minimum value is 1.0 (extremely slow builds) and the maximum is - 1000000 (a pressure value unlikely to ever be reached). See - https://docs.kernel.org/accounting/psi.html for more information. - - At this point in time, experiments show that IO pressure tends to - be short-lived and regulating just the CPU with - :term:`BB_PRESSURE_MAX_CPU` can help to reduce it. - - A default value to limit the IO pressure to be set in ``conf/local.conf`` - could be:: - - BB_PRESSURE_MAX_IO = "15000" - - Multiple values should be tested on the build host to determine what suits - best, depending on the need for performance versus I/O usage during the - build. - - .. note:: - - You may see numerous messages printed by BitBake in case the - :term:`BB_PRESSURE_MAX_IO` is too low:: - - Pressure status changed to CPU: None, IO: True, Mem: False (CPU: 2236.0/None, IO: 153.6/2.0, Mem: 0.0/2.0) - using 19/64 bitbake threads - - This means that the :term:`BB_PRESSURE_MAX_IO` should be increased to - a reasonable value for limiting the I/O pressure on the system. - Monitor the varying value after ``IO:`` above to set a sensible value. - - :term:`BB_PRESSURE_MAX_MEMORY` - Specifies a maximum memory pressure threshold, above which BitBake's - scheduler will not start new tasks (providing there is at least - one active task). If no value is set, memory pressure is not - monitored when starting tasks. - - The pressure data is calculated based upon what Linux kernels since - version 4.20 expose under ``/proc/pressure``. The threshold represents - the difference in "total" pressure from the previous second. The - minimum value is 1.0 (extremely slow builds) and the maximum is - 1000000 (a pressure value unlikely to ever be reached). See - https://docs.kernel.org/accounting/psi.html for more information. - - Memory pressure is experienced when time is spent swapping, - refaulting pages from the page cache or performing direct reclaim. - This is why memory pressure is rarely seen, but setting this variable - might be useful as a last resort to prevent OOM errors if they are - occurring during builds. - - A default value to limit the memory pressure to be set in - ``conf/local.conf`` could be:: - - BB_PRESSURE_MAX_MEMORY = "15000" - - Multiple values should be tested on the build host to determine what suits - best, depending on the need for performance versus memory consumption - during the build. - - .. note:: - - You may see numerous messages printed by BitBake in case the - :term:`BB_PRESSURE_MAX_MEMORY` is too low:: - - Pressure status changed to CPU: None, IO: False, Mem: True (CPU: 29.5/None, IO: 0.0/2.0, Mem: 2553.3/2.0) - using 17/64 bitbake threads - - This means that the :term:`BB_PRESSURE_MAX_MEMORY` should be increased to - a reasonable value for limiting the memory pressure on the system. - Monitor the varying value after ``Mem:`` above to set a sensible value. - - :term:`BB_RUNFMT` - Specifies the name of the executable script files (i.e. run files) - saved into ``${``\ :term:`T`\ ``}``. By default, the - :term:`BB_RUNFMT` variable is undefined and the run filenames get - created using the following form:: - - run.{func}.{pid} - - If you want to force run files to take a specific name, you can set this - variable in a configuration file. - - :term:`BB_RUNTASK` - Contains the name of the currently executing task. The value includes - the "do\_" prefix. For example, if the currently executing task is - ``do_config``, the value is "do_config". - - :term:`BB_SCHEDULER` - Selects the name of the scheduler to use for the scheduling of - BitBake tasks. Three options exist: - - - *basic* --- the basic framework from which everything derives. Using - this option causes tasks to be ordered numerically as they are - parsed. - - - *speed* --- executes tasks first that have more tasks depending on - them. The "speed" option is the default. - - - *completion* --- causes the scheduler to try to complete a given - recipe once its build has started. - - :term:`BB_SCHEDULERS` - Defines custom schedulers to import. Custom schedulers need to be - derived from the ``RunQueueScheduler`` class. - - For information how to select a scheduler, see the - :term:`BB_SCHEDULER` variable. - - :term:`BB_SETSCENE_DEPVALID` - Specifies a function BitBake calls that determines whether BitBake - requires a setscene dependency to be met. - - When running a setscene task, BitBake needs to know which - dependencies of that setscene task also need to be run. Whether - dependencies also need to be run is highly dependent on the metadata. - The function specified by this variable returns a "True" or "False" - depending on whether the dependency needs to be met. - - :term:`BB_SIGNATURE_EXCLUDE_FLAGS` - Lists variable flags (varflags) that can be safely excluded from - checksum and dependency data for keys in the datastore. When - generating checksum or dependency data for keys in the datastore, the - flags set against that key are normally included in the checksum. - - For more information on varflags, see the - ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variable flags`" - section. - - :term:`BB_SIGNATURE_HANDLER` - Defines the name of the signature handler BitBake uses. The signature - handler defines the way stamp files are created and handled, if and - how the signature is incorporated into the stamps, and how the - signature itself is generated. - - A new signature handler can be added by injecting a class derived - from the ``SignatureGenerator`` class into the global namespace. - - :term:`BB_SRCREV_POLICY` - Defines the behavior of the fetcher when it interacts with source - control systems and dynamic source revisions. The - :term:`BB_SRCREV_POLICY` variable is useful when working without a - network. - - The variable can be set using one of two policies: - - - *cache* --- retains the value the system obtained previously rather - than querying the source control system each time. - - - *clear* --- queries the source controls system every time. With this - policy, there is no cache. The "clear" policy is the default. - - :term:`BB_STRICT_CHECKSUM` - Sets a more strict checksum mechanism for non-local URLs. Setting - this variable to a value causes BitBake to report an error if it - encounters a non-local URL that does not have at least one checksum - specified. - - :term:`BB_TASK_IONICE_LEVEL` - Allows adjustment of a task's Input/Output priority. During - Autobuilder testing, random failures can occur for tasks due to I/O - starvation. These failures occur during various QEMU runtime - timeouts. You can use the :term:`BB_TASK_IONICE_LEVEL` variable to adjust - the I/O priority of these tasks. - - .. note:: - - This variable works similarly to the :term:`BB_TASK_NICE_LEVEL` - variable except with a task's I/O priorities. - - Set the variable as follows:: - - BB_TASK_IONICE_LEVEL = "class.prio" - - For *class*, the default value is "2", which is a best effort. You can use - "1" for realtime and "3" for idle. If you want to use realtime, you - must have superuser privileges. - - For *prio*, you can use any value from "0", which is the highest - priority, to "7", which is the lowest. The default value is "4". You - do not need any special privileges to use this range of priority - values. - - .. note:: - - In order for your I/O priority settings to take effect, you need the - Budget Fair Queuing (BFQ) Scheduler selected for the backing block - device. To select the scheduler, use the following command form where - device is the device (e.g. sda, sdb, and so forth):: - - $ sudo sh -c "echo bfq > /sys/block/device/queue/scheduler" - - :term:`BB_TASK_NICE_LEVEL` - Allows specific tasks to change their priority (i.e. nice level). - - You can use this variable in combination with task overrides to raise - or lower priorities of specific tasks. For example, on the `Yocto - Project `__ autobuilder, QEMU emulation - in images is given a higher priority as compared to build tasks to - ensure that images do not suffer timeouts on loaded systems. - - :term:`BB_TASKHASH` - Within an executing task, this variable holds the hash of the task as - returned by the currently enabled signature generator. - - :term:`BB_USE_HOME_NPMRC` - Controls whether or not BitBake uses the user's .npmrc file within their - home directory within the npm fetcher. This can be used for authentication - of private NPM registries, among other uses. This is turned off by default - and requires the user to explicitly set it to "1" to enable. - - :term:`BB_VERBOSE_LOGS` - Controls how verbose BitBake is during builds. If set, shell scripts - echo commands and shell script output appears on standard out - (stdout). - - :term:`BB_WORKERCONTEXT` - Specifies if the current context is executing a task. BitBake sets - this variable to "1" when a task is being executed. The value is not - set when the task is in server context during parsing or event - handling. - - :term:`BBCLASSEXTEND` - Allows you to extend a recipe so that it builds variants of the - software. Some examples of these variants for recipes from the - OpenEmbedded-Core metadata are "natives" such as ``quilt-native``, - which is a copy of Quilt built to run on the build system; "crosses" - such as ``gcc-cross``, which is a compiler built to run on the build - machine but produces binaries that run on the target ``MACHINE``; - "nativesdk", which targets the SDK machine instead of ``MACHINE``; - and "mulitlibs" in the form "``multilib:``\ multilib_name". - - To build a different variant of the recipe with a minimal amount of - code, it usually is as simple as adding the variable to your recipe. - Here are two examples. The "native" variants are from the - OpenEmbedded-Core metadata:: - - BBCLASSEXTEND =+ "native nativesdk" - BBCLASSEXTEND =+ "multilib:multilib_name" - - .. note:: - - Internally, the :term:`BBCLASSEXTEND` mechanism generates recipe - variants by rewriting variable values and applying overrides such - as ``_class-native``. For example, to generate a native version of - a recipe, a :term:`DEPENDS` on "foo" is - rewritten to a :term:`DEPENDS` on "foo-native". - - Even when using :term:`BBCLASSEXTEND`, the recipe is only parsed once. - Parsing once adds some limitations. For example, it is not - possible to include a different file depending on the variant, - since ``include`` statements are processed when the recipe is - parsed. - - :term:`BBDEBUG` - Sets the BitBake debug output level to a specific value as - incremented by the ``-D`` command line option. - - .. note:: - - You must set this variable in the external environment in order - for it to work. - - :term:`BBFILE_COLLECTIONS` - Lists the names of configured layers. These names are used to find - the other ``BBFILE_*`` variables. Typically, each layer appends its - name to this variable in its ``conf/layer.conf`` file. - - :term:`BBFILE_PATTERN` - Variable that expands to match files from - :term:`BBFILES` in a particular layer. This - variable is used in the ``conf/layer.conf`` file and must be suffixed - with the name of the specific layer (e.g. - ``BBFILE_PATTERN_emenlow``). - - :term:`BBFILE_PRIORITY` - Assigns the priority for recipe files in each layer. - - This variable is used in the ``conf/layer.conf`` file and must be - suffixed with a `_` followed by the name of the specific layer (e.g. - ``BBFILE_PRIORITY_emenlow``). Colon as separator is not supported. - - This variable is useful in situations where the same recipe appears - in more than one layer. Setting this variable allows you to - prioritize a layer against other layers that contain the same recipe - --- effectively letting you control the precedence for the multiple - layers. The precedence established through this variable stands - regardless of a recipe's version (:term:`PV` variable). - For example, a layer that has a recipe with a higher :term:`PV` value but - for which the :term:`BBFILE_PRIORITY` is set to have a lower precedence - still has a lower precedence. - - A larger value for the :term:`BBFILE_PRIORITY` variable results in a - higher precedence. For example, the value 6 has a higher precedence - than the value 5. If not specified, the :term:`BBFILE_PRIORITY` variable - is set based on layer dependencies (see the :term:`LAYERDEPENDS` variable - for more information). The default priority, if unspecified for a - layer with no dependencies, is the lowest defined priority + 1 (or 1 - if no priorities are defined). - - .. tip:: - - You can use the command bitbake-layers show-layers to list all - configured layers along with their priorities. - - :term:`BBFILES` - A space-separated list of recipe files BitBake uses to build - software. - - When specifying recipe files, you can pattern match using Python's - `glob `_ syntax. - For details on the syntax, see the documentation by following the - previous link. - - :term:`BBFILES_DYNAMIC` - Activates content depending on presence of identified layers. You - identify the layers by the collections that the layers define. - - Use the :term:`BBFILES_DYNAMIC` variable to avoid ``.bbappend`` files whose - corresponding ``.bb`` file is in a layer that attempts to modify other - layers through ``.bbappend`` but does not want to introduce a hard - dependency on those other layers. - - Additionally you can prefix the rule with "!" to add ``.bbappend`` and - ``.bb`` files in case a layer is not present. Use this avoid hard - dependency on those other layers. - - Use the following form for :term:`BBFILES_DYNAMIC`:: - - collection_name:filename_pattern - - The following example identifies two collection names and two filename - patterns:: - - BBFILES_DYNAMIC += "\ - clang-layer:${LAYERDIR}/bbappends/meta-clang/*/*/*.bbappend \ - core:${LAYERDIR}/bbappends/openembedded-core/meta/*/*/*.bbappend \ - " - - When the collection name is prefixed with "!" it will add the file pattern in case - the layer is absent:: - - BBFILES_DYNAMIC += "\ - !clang-layer:${LAYERDIR}/backfill/meta-clang/*/*/*.bb \ - " - - This next example shows an error message that occurs because invalid - entries are found, which cause parsing to fail:: - - ERROR: BBFILES_DYNAMIC entries must be of the form {!}:, not: - /work/my-layer/bbappends/meta-security-isafw/*/*/*.bbappend - /work/my-layer/bbappends/openembedded-core/meta/*/*/*.bbappend - - :term:`BBINCLUDED` - Contains a space-separated list of all of all files that BitBake's - parser included during parsing of the current file. - - :term:`BBINCLUDELOGS` - If set to a value, enables printing the task log when reporting a - failed task. - - :term:`BBINCLUDELOGS_LINES` - If :term:`BBINCLUDELOGS` is set, specifies - the maximum number of lines from the task log file to print when - reporting a failed task. If you do not set :term:`BBINCLUDELOGS_LINES`, - the entire log is printed. - - :term:`BBLAYERS` - Lists the layers to enable during the build. This variable is defined - in the ``bblayers.conf`` configuration file in the build directory. - Here is an example:: - - BBLAYERS = " \ - /home/scottrif/poky/meta \ - /home/scottrif/poky/meta-yocto \ - /home/scottrif/poky/meta-yocto-bsp \ - /home/scottrif/poky/meta-mykernel \ - " - - This example enables four layers, one of which is a custom, user-defined - layer named ``meta-mykernel``. - - :term:`BBLAYERS_FETCH_DIR` - Sets the base location where layers are stored. This setting is used - in conjunction with ``bitbake-layers layerindex-fetch`` and tells - ``bitbake-layers`` where to place the fetched layers. - - :term:`BBMASK` - Prevents BitBake from processing recipes and recipe append files. - - You can use the :term:`BBMASK` variable to "hide" these ``.bb`` and - ``.bbappend`` files. BitBake ignores any recipe or recipe append - files that match any of the expressions. It is as if BitBake does not - see them at all. Consequently, matching files are not parsed or - otherwise used by BitBake. - - The values you provide are passed to Python's regular expression - compiler. Consequently, the syntax follows Python's Regular - Expression (re) syntax. The expressions are compared against the full - paths to the files. For complete syntax information, see Python's - documentation at http://docs.python.org/3/library/re.html. - - The following example uses a complete regular expression to tell - BitBake to ignore all recipe and recipe append files in the - ``meta-ti/recipes-misc/`` directory:: - - BBMASK = "meta-ti/recipes-misc/" - - If you want to mask out multiple directories or recipes, you can - specify multiple regular expression fragments. This next example - masks out multiple directories and individual recipes:: - - BBMASK += "/meta-ti/recipes-misc/ meta-ti/recipes-ti/packagegroup/" - BBMASK += "/meta-oe/recipes-support/" - BBMASK += "/meta-foo/.*/openldap" - BBMASK += "opencv.*\.bbappend" - BBMASK += "lzma" - - .. note:: - - When specifying a directory name, use the trailing slash character - to ensure you match just that directory name. - - :term:`BBMULTICONFIG` - Enables BitBake to perform multiple configuration builds and lists - each separate configuration (multiconfig). You can use this variable - to cause BitBake to build multiple targets where each target has a - separate configuration. Define :term:`BBMULTICONFIG` in your - ``conf/local.conf`` configuration file. - - As an example, the following line specifies three multiconfigs, each - having a separate configuration file:: - - BBMULTIFONFIG = "configA configB configC" - - Each configuration file you use must reside in the - build directory within a directory named ``conf/multiconfig`` (e.g. - build_directory\ ``/conf/multiconfig/configA.conf``). - - For information on how to use :term:`BBMULTICONFIG` in an environment - that supports building targets with multiple configurations, see the - ":ref:`bitbake-user-manual/bitbake-user-manual-intro:executing a multiple configuration build`" - section. - - :term:`BBPATH` - A colon-separated list used by BitBake to locate class (``.bbclass``) - and configuration (``.conf``) files. This variable is analogous to the - ``PATH`` variable. - - If you run BitBake from a directory outside of the build directory, - you must be sure to set :term:`BBPATH` to point to the build directory. - Set the variable as you would any environment variable and then run - BitBake:: - - $ BBPATH="build_directory" - $ export BBPATH - $ bitbake target - - :term:`BBSERVER` - Points to the server that runs memory-resident BitBake. The variable - is only used when you employ memory-resident BitBake. - - :term:`BBTARGETS` - Allows you to use a configuration file to add to the list of - command-line target recipes you want to build. - - :term:`BITBAKE_UI` - Used to specify the UI module to use when running BitBake. Using this - variable is equivalent to using the ``-u`` command-line option. - - .. note:: - - You must set this variable in the external environment in order - for it to work. - - :term:`BUILDNAME` - A name assigned to the build. The name defaults to a datetime stamp - of when the build was started but can be defined by the metadata. - - :term:`BZRDIR` - The directory in which files checked out of a Bazaar system are - stored. - - :term:`CACHE` - Specifies the directory BitBake uses to store a cache of the metadata - so it does not need to be parsed every time BitBake is started. - - :term:`CVSDIR` - The directory in which files checked out under the CVS system are - stored. - - :term:`DEFAULT_PREFERENCE` - Specifies a weak bias for recipe selection priority. - - The most common usage of this is variable is to set it to "-1" within - a recipe for a development version of a piece of software. Using the - variable in this way causes the stable version of the recipe to build - by default in the absence of :term:`PREFERRED_VERSION` being used to - build the development version. - - .. note:: - - The bias provided by DEFAULT_PREFERENCE is weak and is overridden by - :term:`BBFILE_PRIORITY` if that variable is different between two - layers that contain different versions of the same recipe. - - :term:`DEPENDS` - Lists a recipe's build-time dependencies (i.e. other recipe files). - - Consider this simple example for two recipes named "a" and "b" that - produce similarly named packages. In this example, the :term:`DEPENDS` - statement appears in the "a" recipe:: - - DEPENDS = "b" - - Here, the dependency is such that the ``do_configure`` task for recipe "a" - depends on the ``do_populate_sysroot`` task of recipe "b". This means - anything that recipe "b" puts into sysroot is available when recipe "a" is - configuring itself. - - For information on runtime dependencies, see the :term:`RDEPENDS` - variable. - - :term:`DESCRIPTION` - A long description for the recipe. - - :term:`DL_DIR` - The central download directory used by the build process to store - downloads. By default, :term:`DL_DIR` gets files suitable for mirroring for - everything except Git repositories. If you want tarballs of Git - repositories, use the :term:`BB_GENERATE_MIRROR_TARBALLS` variable. - - :term:`EXCLUDE_FROM_WORLD` - Directs BitBake to exclude a recipe from world builds (i.e. - ``bitbake world``). During world builds, BitBake locates, parses and - builds all recipes found in every layer exposed in the - ``bblayers.conf`` configuration file. - - To exclude a recipe from a world build using this variable, set the - variable to "1" in the recipe. Set it to "0" to add it back to world build. - - .. note:: - - Recipes added to :term:`EXCLUDE_FROM_WORLD` may still be built during a world - build in order to satisfy dependencies of other recipes. Adding a - recipe to :term:`EXCLUDE_FROM_WORLD` only ensures that the recipe is not - explicitly added to the list of build targets in a world build. - - :term:`FAKEROOT` - Contains the command to use when running a shell script in a fakeroot - environment. The :term:`FAKEROOT` variable is obsolete and has been - replaced by the other ``FAKEROOT*`` variables. See these entries in - the glossary for more information. - - :term:`FAKEROOTBASEENV` - Lists environment variables to set when executing the command defined - by :term:`FAKEROOTCMD` that starts the - bitbake-worker process in the fakeroot environment. - - :term:`FAKEROOTCMD` - Contains the command that starts the bitbake-worker process in the - fakeroot environment. - - :term:`FAKEROOTDIRS` - Lists directories to create before running a task in the fakeroot - environment. - - :term:`FAKEROOTENV` - Lists environment variables to set when running a task in the - fakeroot environment. For additional information on environment - variables and the fakeroot environment, see the - :term:`FAKEROOTBASEENV` variable. - - :term:`FAKEROOTNOENV` - Lists environment variables to set when running a task that is not in - the fakeroot environment. For additional information on environment - variables and the fakeroot environment, see the - :term:`FAKEROOTENV` variable. - - :term:`FETCHCMD` - Defines the command the BitBake fetcher module executes when running - fetch operations. You need to use an override suffix when you use the - variable (e.g. ``FETCHCMD_git`` or ``FETCHCMD_svn``). - - :term:`FILE` - Points at the current file. BitBake sets this variable during the - parsing process to identify the file being parsed. BitBake also sets - this variable when a recipe is being executed to identify the recipe - file. - - :term:`FILESPATH` - Specifies directories BitBake uses when searching for patches and - files. The "local" fetcher module uses these directories when - handling ``file://`` URLs. The variable behaves like a shell ``PATH`` - environment variable. The value is a colon-separated list of - directories that are searched left-to-right in order. - - :term:`FILE_LAYERNAME` - During parsing and task execution, this is set to the name of the - layer containing the recipe file. Code can use this to identify which - layer a recipe is from. - - :term:`GITDIR` - The directory in which a local copy of a Git repository is stored - when it is cloned. - - :term:`HGDIR` - The directory in which files checked out of a Mercurial system are - stored. - - :term:`HOMEPAGE` - Website where more information about the software the recipe is - building can be found. - - :term:`INHERIT` - Causes the named class or classes to be inherited globally. Anonymous - functions in the class or classes are executed in two disjoint situations: - - - When only the - :ref:`base configuration ` - is parsed. For example as a result of the following BitBake invocation:: - - $ bitbake -e - - - When recipes are parsed - then for each parsed recipe. - - Bitbake ignores changes to :term:`INHERIT` in individual recipes. - - For more information on :term:`INHERIT`, see the - ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:\`\`inherit\`\` configuration directive`" - section. - - :term:`LAYERDEPENDS` - Lists the layers, separated by spaces, upon which this recipe - depends. Optionally, you can specify a specific layer version for a - dependency by adding it to the end of the layer name with a colon, - (e.g. "anotherlayer:3" to be compared against - :term:`LAYERVERSION`\ ``_anotherlayer`` in - this case). BitBake produces an error if any dependency is missing or - the version numbers do not match exactly (if specified). - - You use this variable in the ``conf/layer.conf`` file. You must also - use the specific layer name as a suffix to the variable (e.g. - ``LAYERDEPENDS_mylayer``). - - :term:`LAYERDIR` - When used inside the ``layer.conf`` configuration file, this variable - provides the path of the current layer. This variable is not - available outside of ``layer.conf`` and references are expanded - immediately when parsing of the file completes. - - :term:`LAYERDIR_RE` - When used inside the ``layer.conf`` configuration file, this variable - provides the path of the current layer, escaped for use in a regular - expression (:term:`BBFILE_PATTERN`). This - variable is not available outside of ``layer.conf`` and references - are expanded immediately when parsing of the file completes. - - :term:`LAYERSERIES_COMPAT` - Lists the versions of the OpenEmbedded-Core (OE-Core) for which - a layer is compatible. Using the :term:`LAYERSERIES_COMPAT` variable - allows the layer maintainer to indicate which combinations of the - layer and OE-Core can be expected to work. The variable gives the - system a way to detect when a layer has not been tested with new - releases of OE-Core (e.g. the layer is not maintained). - - To specify the OE-Core versions for which a layer is compatible, use - this variable in your layer's ``conf/layer.conf`` configuration file. - For the list, use the Yocto Project release name (e.g. "kirkstone", - "mickledore"). To specify multiple OE-Core versions for the layer, use - a space-separated list:: - - LAYERSERIES_COMPAT_layer_root_name = "kirkstone mickledore" - - .. note:: - - Setting :term:`LAYERSERIES_COMPAT` is required by the Yocto Project - Compatible version 2 standard. - The OpenEmbedded build system produces a warning if the variable - is not set for any given layer. - - :term:`LAYERVERSION` - Optionally specifies the version of a layer as a single number. You - can use this variable within - :term:`LAYERDEPENDS` for another layer in - order to depend on a specific version of the layer. - - You use this variable in the ``conf/layer.conf`` file. You must also - use the specific layer name as a suffix to the variable (e.g. - ``LAYERDEPENDS_mylayer``). - - :term:`LICENSE` - The list of source licenses for the recipe. - - :term:`MIRRORS` - Specifies additional paths from which BitBake gets source code. When - the build system searches for source code, it first tries the local - download directory. If that location fails, the build system tries - locations defined by :term:`PREMIRRORS`, the - upstream source, and then locations specified by :term:`MIRRORS` in that - order. - - :term:`OVERRIDES` - A colon-separated list that BitBake uses to control what variables are - overridden after BitBake parses recipes and configuration files. - - Following is a simple example that uses an overrides list based on - machine architectures: OVERRIDES = "arm:x86:mips:powerpc" You can - find information on how to use :term:`OVERRIDES` in the - ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:conditional syntax - (overrides)`" section. - - :term:`P4DIR` - The directory in which a local copy of a Perforce depot is stored - when it is fetched. - - :term:`PACKAGES` - The list of packages the recipe creates. - - :term:`PACKAGES_DYNAMIC` - A promise that your recipe satisfies runtime dependencies for - optional modules that are found in other recipes. - :term:`PACKAGES_DYNAMIC` does not actually satisfy the dependencies, it - only states that they should be satisfied. For example, if a hard, - runtime dependency (:term:`RDEPENDS`) of another - package is satisfied during the build through the - :term:`PACKAGES_DYNAMIC` variable, but a package with the module name is - never actually produced, then the other package will be broken. - - :term:`PE` - The epoch of the recipe. By default, this variable is unset. The - variable is used to make upgrades possible when the versioning scheme - changes in some backwards incompatible way. - - :term:`PERSISTENT_DIR` - Specifies the directory BitBake uses to store data that should be - preserved between builds. In particular, the data stored is the data - that uses BitBake's persistent data API, the data used by the PR - Server and PR Service, and the default location of the Hash Equivalence - database (when :term:`BB_HASHSERVE` is set to ``auto``). - - This directory should not be shared between different builds. If you need - to share the Hash Equivalence database, you should setup a Hash - Equivalence server instead. - - :term:`PF` - Specifies the recipe or package name and includes all version and - revision numbers (i.e. ``eglibc-2.13-r20+svnr15508/`` and - ``bash-4.2-r1/``). - - :term:`PN` - The recipe name. - - :term:`PR` - The revision of the recipe. - - :term:`PREFERRED_PROVIDER` - Determines which recipe should be given preference when multiple - recipes provide the same item. You should always suffix the variable - with the name of the provided item, and you should set it to the - :term:`PN` of the recipe to which you want to give - precedence. Some examples:: - - PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto" - PREFERRED_PROVIDER_virtual/xserver = "xserver-xf86" - PREFERRED_PROVIDER_virtual/libgl ?= "mesa" - - :term:`PREFERRED_PROVIDERS` - Determines which recipe should be given preference for cases where - multiple recipes provide the same item. Functionally, - :term:`PREFERRED_PROVIDERS` is identical to - :term:`PREFERRED_PROVIDER`. However, the :term:`PREFERRED_PROVIDERS` variable - lets you define preferences for multiple situations using the following - form:: - - PREFERRED_PROVIDERS = "xxx:yyy aaa:bbb ..." - - This form is a convenient replacement for the following:: - - PREFERRED_PROVIDER_xxx = "yyy" - PREFERRED_PROVIDER_aaa = "bbb" - - :term:`PREFERRED_VERSION` - If there are multiple versions of a recipe available, this variable - determines which version should be given preference. You must always - suffix the variable with the :term:`PN` you want to - select, and you should set :term:`PV` accordingly for - precedence. - - The :term:`PREFERRED_VERSION` variable supports limited wildcard use - through the "``%``" character. You can use the character to match any - number of characters, which can be useful when specifying versions - that contain long revision numbers that potentially change. Here are - two examples:: - - PREFERRED_VERSION_python = "2.7.3" - PREFERRED_VERSION_linux-yocto = "4.12%" - - .. important:: - - The use of the " % " character is limited in that it only works at the - end of the string. You cannot use the wildcard character in any other - location of the string. - - If a recipe with the specified version is not available, a warning - message will be shown. See :term:`REQUIRED_VERSION` if you want this - to be an error instead. - - :term:`PREMIRRORS` - Specifies additional paths from which BitBake gets source code. When - the build system searches for source code, it first tries the local - download directory. If that location fails, the build system tries - locations defined by :term:`PREMIRRORS`, the upstream source, and then - locations specified by :term:`MIRRORS` in that order. - - Typically, you would add a specific server for the build system to - attempt before any others by adding something like the following to - your configuration:: - - PREMIRRORS:prepend = "\ - git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \ - ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \ - http://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \ - https://.*/.* http://downloads.yoctoproject.org/mirror/sources/" - - These changes cause the build system to intercept Git, FTP, HTTP, and - HTTPS requests and direct them to the ``http://`` sources mirror. You can - use ``file://`` URLs to point to local directories or network shares as - well. - - :term:`PROVIDES` - A list of aliases by which a particular recipe can be known. By - default, a recipe's own :term:`PN` is implicitly already in its - :term:`PROVIDES` list. If a recipe uses :term:`PROVIDES`, the additional - aliases are synonyms for the recipe and can be useful satisfying - dependencies of other recipes during the build as specified by - :term:`DEPENDS`. - - Consider the following example :term:`PROVIDES` statement from a recipe - file ``libav_0.8.11.bb``:: - - PROVIDES += "libpostproc" - - The :term:`PROVIDES` statement results in the "libav" recipe also being known - as "libpostproc". - - In addition to providing recipes under alternate names, the - :term:`PROVIDES` mechanism is also used to implement virtual targets. A - virtual target is a name that corresponds to some particular - functionality (e.g. a Linux kernel). Recipes that provide the - functionality in question list the virtual target in :term:`PROVIDES`. - Recipes that depend on the functionality in question can include the - virtual target in :term:`DEPENDS` to leave the - choice of provider open. - - Conventionally, virtual targets have names on the form - "virtual/function" (e.g. "virtual/kernel"). The slash is simply part - of the name and has no syntactical significance. - - :term:`PRSERV_HOST` - The network based :term:`PR` service host and port. - - Following is an example of how the :term:`PRSERV_HOST` variable is set:: - - PRSERV_HOST = "localhost:0" - - You must set the variable if you want to automatically start a local PR - service. You can set :term:`PRSERV_HOST` to other values to use a remote PR - service. - - :term:`PV` - The version of the recipe. - - :term:`RDEPENDS` - Lists a package's runtime dependencies (i.e. other packages) that - must be installed in order for the built package to run correctly. If - a package in this list cannot be found during the build, you will get - a build error. - - Because the :term:`RDEPENDS` variable applies to packages being built, - you should always use the variable in a form with an attached package - name. For example, suppose you are building a development package - that depends on the ``perl`` package. In this case, you would use the - following :term:`RDEPENDS` statement:: - - RDEPENDS:${PN}-dev += "perl" - - In the example, the development package depends on the ``perl`` package. - Thus, the :term:`RDEPENDS` variable has the ``${PN}-dev`` package name as part - of the variable. - - BitBake supports specifying versioned dependencies. Although the - syntax varies depending on the packaging format, BitBake hides these - differences from you. Here is the general syntax to specify versions - with the :term:`RDEPENDS` variable:: - - RDEPENDS:${PN} = "package (operator version)" - - For ``operator``, you can specify the following:: - - = - < - > - <= - >= - - For example, the following sets up a dependency on version 1.2 or - greater of the package ``foo``:: - - RDEPENDS:${PN} = "foo (>= 1.2)" - - For information on build-time dependencies, see the :term:`DEPENDS` - variable. - - :term:`REPODIR` - The directory in which a local copy of a ``google-repo`` directory is - stored when it is synced. - - :term:`REQUIRED_VERSION` - If there are multiple versions of a recipe available, this variable - determines which version should be given preference. :term:`REQUIRED_VERSION` - works in exactly the same manner as :term:`PREFERRED_VERSION`, except - that if the specified version is not available then an error message - is shown and the build fails immediately. - - If both :term:`REQUIRED_VERSION` and :term:`PREFERRED_VERSION` are set for - the same recipe, the :term:`REQUIRED_VERSION` value applies. - - :term:`RPROVIDES` - A list of package name aliases that a package also provides. These - aliases are useful for satisfying runtime dependencies of other - packages both during the build and on the target (as specified by - :term:`RDEPENDS`). - - As with all package-controlling variables, you must always use the - variable in conjunction with a package name override. Here is an - example:: - - RPROVIDES:${PN} = "widget-abi-2" - - :term:`RRECOMMENDS` - A list of packages that extends the usability of a package being - built. The package being built does not depend on this list of - packages in order to successfully build, but needs them for the - extended usability. To specify runtime dependencies for packages, see - the :term:`RDEPENDS` variable. - - BitBake supports specifying versioned recommends. Although the syntax - varies depending on the packaging format, BitBake hides these - differences from you. Here is the general syntax to specify versions - with the :term:`RRECOMMENDS` variable:: - - RRECOMMENDS:${PN} = "package (operator version)" - - For ``operator``, you can specify the following:: - - = - < - > - <= - >= - - For example, the following sets up a recommend on version - 1.2 or greater of the package ``foo``:: - - RRECOMMENDS:${PN} = "foo (>= 1.2)" - - :term:`SECTION` - The section in which packages should be categorized. - - :term:`SRC_URI` - The list of source files --- local or remote. This variable tells - BitBake which bits to pull for the build and how to pull them. For - example, if the recipe or append file needs to fetch a single tarball - from the Internet, the recipe or append file uses a :term:`SRC_URI` - entry that specifies that tarball. On the other hand, if the recipe or - append file needs to fetch a tarball, apply two patches, and include - a custom file, the recipe or append file needs an :term:`SRC_URI` - variable that specifies all those sources. - - The following list explains the available URI protocols. URI - protocols are highly dependent on particular BitBake Fetcher - submodules. Depending on the fetcher BitBake uses, various URL - parameters are employed. For specifics on the supported Fetchers, see - the :ref:`bitbake-user-manual/bitbake-user-manual-fetching:fetchers` - section. - - - ``az://``: Fetches files from an Azure Storage account using HTTPS. - - - ``bzr://``: Fetches files from a Bazaar revision control - repository. - - - ``ccrc://``: Fetches files from a ClearCase repository. - - - ``cvs://``: Fetches files from a CVS revision control - repository. - - - ``file://``: Fetches files, which are usually files shipped - with the Metadata, from the local machine. - The path is relative to the :term:`FILESPATH` - variable. Thus, the build system searches, in order, from the - following directories, which are assumed to be a subdirectories of - the directory in which the recipe file (``.bb``) or append file - (``.bbappend``) resides: - - - ``${BPN}``: the base recipe name without any special suffix - or version numbers. - - - ``${BP}`` - ``${BPN}-${PV}``: the base recipe name and - version but without any special package name suffix. - - - ``files``: files within a directory, which is named ``files`` - and is also alongside the recipe or append file. - - - ``ftp://``: Fetches files from the Internet using FTP. - - - ``git://``: Fetches files from a Git revision control - repository. - - - ``gitsm://``: Fetches submodules from a Git revision control - repository. - - - ``hg://``: Fetches files from a Mercurial (``hg``) revision - control repository. - - - ``http://``: Fetches files from the Internet using HTTP. - - - ``https://``: Fetches files from the Internet using HTTPS. - - - ``npm://``: Fetches JavaScript modules from a registry. - - - ``osc://``: Fetches files from an OSC (OpenSUSE Build service) - revision control repository. - - - ``p4://``: Fetches files from a Perforce (``p4``) revision - control repository. - - - ``repo://``: Fetches files from a repo (Git) repository. - - - ``ssh://``: Fetches files from a secure shell. - - - ``svn://``: Fetches files from a Subversion (``svn``) revision - control repository. - - Here are some additional options worth mentioning: - - - ``downloadfilename``: Specifies the filename used when storing - the downloaded file. - - - ``name``: Specifies a name to be used for association with - :term:`SRC_URI` checksums or :term:`SRCREV` when you have more than one - file or source control repository specified in :term:`SRC_URI`. - For example:: - - SRC_URI = "git://example.com/foo.git;branch=main;name=first \ - git://example.com/bar.git;branch=main;name=second \ - http://example.com/file.tar.gz;name=third" - - SRCREV_first = "f1d2d2f924e986ac86fdf7b36c94bcdf32beec15" - SRCREV_second = "e242ed3bffccdf271b7fbaf34ed72d089537b42f" - SRC_URI[third.sha256sum] = "13550350a8681c84c861aac2e5b440161c2b33a3e4f302ac680ca5b686de48de" - - - ``subdir``: Places the file (or extracts its contents) into the - specified subdirectory. This option is useful for unusual tarballs - or other archives that do not have their files already in a - subdirectory within the archive. This path can be further modified - by fetcher specific parameters. - - - ``subpath``: Limits the checkout to a specific subpath of the - tree when using the Git fetcher is used. - - - ``unpack``: Controls whether or not to unpack the file if it is - an archive. The default action is to unpack the file. - - :term:`SRCDATE` - The date of the source code used to build the package. This variable - applies only if the source was fetched from a Source Code Manager - (SCM). - - :term:`SRCREV` - The revision of the source code used to build the package. This - variable applies only when using Subversion, Git, Mercurial and - Bazaar. If you want to build a fixed revision and you want to avoid - performing a query on the remote repository every time BitBake parses - your recipe, you should specify a :term:`SRCREV` that is a full revision - identifier and not just a tag. - - :term:`SRCREV_FORMAT` - Helps construct a valid package version string when - multiple source controlled URLs are used in - :term:`SRC_URI`. - - The system needs help constructing this value under these - circumstances. Each component in the :term:`SRC_URI` is assigned a name - and these are referenced in the :term:`SRCREV_FORMAT` variable. Consider - an example with URLs named "machine" and "meta". In this case, - :term:`SRCREV_FORMAT` could look like "machine_meta" and those names - would have the SCM versions substituted into each position. Only one - ``AUTOINC`` placeholder is added and if needed. And, this placeholder - is placed at the start of the returned string. - - The :term:`SRCREV_FORMAT` can also take the form "_component2". - This assumes that there is a component in the :term:`SRC_URI` that does not - have a name assigned. While this is not considered good practice, it can be - usefull if a ``.bbappend`` file needs to extend the :term:`SRC_URI` with - an additional repository. - - :term:`STAMP` - Specifies the base path used to create recipe stamp files. The path - to an actual stamp file is constructed by evaluating this string and - then appending additional information. - - :term:`STAMPCLEAN` - Specifies the base path used to create recipe stamp files. Unlike the - :term:`STAMP` variable, :term:`STAMPCLEAN` can contain - wildcards to match the range of files a clean operation should - remove. BitBake uses a clean operation to remove any other stamps it - should be removing when creating a new stamp. - - :term:`SUMMARY` - A short summary for the recipe, which is 72 characters or less. - - :term:`SVNDIR` - The directory in which files checked out of a Subversion system are - stored. - - :term:`T` - Points to a directory were BitBake places temporary files, which - consist mostly of task logs and scripts, when building a particular - recipe. - - :term:`TOPDIR` - Points to the build directory. BitBake automatically sets this - variable. diff --git a/bitbake/doc/bitbake-user-manual/figures/bb_multiconfig_files.png b/bitbake/doc/bitbake-user-manual/figures/bb_multiconfig_files.png deleted file mode 100644 index 041f06403b..0000000000 Binary files a/bitbake/doc/bitbake-user-manual/figures/bb_multiconfig_files.png and /dev/null differ diff --git a/bitbake/doc/bitbake-user-manual/figures/bitbake-title.png b/bitbake/doc/bitbake-user-manual/figures/bitbake-title.png deleted file mode 100644 index cb290154da..0000000000 Binary files a/bitbake/doc/bitbake-user-manual/figures/bitbake-title.png and /dev/null differ diff --git a/bitbake/doc/bitbake.1 b/bitbake/doc/bitbake.1 deleted file mode 100644 index 7fc1652ecd..0000000000 --- a/bitbake/doc/bitbake.1 +++ /dev/null @@ -1,142 +0,0 @@ -.\" Hey, EMACS: -*- nroff -*- -.\" First parameter, NAME, should be all caps -.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection -.\" other parameters are allowed: see man(7), man(1) -.TH BITBAKE 1 "November 19, 2006" -.\" Please adjust this date whenever revising the manpage. -.\" -.\" Some roff macros, for reference: -.\" .nh disable hyphenation -.\" .hy enable hyphenation -.\" .ad l left justify -.\" .ad b justify to both left and right margins -.\" .nf disable filling -.\" .fi enable filling -.\" .br insert line break -.\" .sp insert n+1 empty lines -.\" for manpage-specific macros, see man(7) -.SH NAME -BitBake \- simple tool for the execution of tasks -.SH SYNOPSIS -.B bitbake -.RI [ options ] " packagenames" -.br -.SH DESCRIPTION -This manual page documents briefly the -.B bitbake -command. -.PP -.\" TeX users may be more comfortable with the \fB\fP and -.\" \fI\fP escape sequences to invode bold face and italics, -.\" respectively. -\fBbitbake\fP is a program that executes the specified task (default is 'build') -for a given set of BitBake files. -.br -It expects that BBFILES is defined, which is a space separated list of files to -be executed. BBFILES does support wildcards. -.br -Default BBFILES are the .bb files in the current directory. -.SH OPTIONS -This program follow the usual GNU command line syntax, with long -options starting with two dashes (`-'). -.TP -.B \-h, \-\-help -Show summary of options. -.TP -.B \-\-version -Show version of program. -.TP -.B \-bBUILDFILE, \-\-buildfile=BUILDFILE -execute the task against this .bb file, rather than a package from BBFILES. -.TP -.B \-k, \-\-continue -continue as much as possible after an error. While the target that failed, and -those that depend on it, cannot be remade, the other dependencies of these -targets can be processed all the same. -.TP -.B \-a, \-\-tryaltconfigs -continue with builds by trying to use alternative providers where possible. -.TP -.B \-f, \-\-force -force run of specified cmd, regardless of stamp status -.TP -.B \-i, \-\-interactive -drop into the interactive mode also called the BitBake shell. -.TP -.B \-cCMD, \-\-cmd=CMD -Specify task to execute. Note that this only executes the specified task for -the providee and the packages it depends on, i.e. 'compile' does not implicitly -call stage for the dependencies (IOW: use only if you know what you are doing). -Depending on the base.bbclass a listtasks task is defined and will show -available tasks. -.TP -.B \-rFILE, \-\-read=FILE -read the specified file before bitbake.conf -.TP -.B \-v, \-\-verbose -output more chit-chat to the terminal -.TP -.B \-D, \-\-debug -Increase the debug level. You can specify this more than once. -.TP -.B \-n, \-\-dry-run -don't execute, just go through the motions -.TP -.B \-p, \-\-parse-only -quit after parsing the BB files (developers only) -.TP -.B \-s, \-\-show-versions -show current and preferred versions of all packages -.TP -.B \-e, \-\-environment -show the global or per-recipe environment (this is what used to be bbread) -.TP -.B \-g, \-\-graphviz -emit the dependency trees of the specified packages in the dot syntax -.TP -.B \-IIGNORED\_DOT\_DEPS, \-\-ignore-deps=IGNORED_DOT_DEPS -Stop processing at the given list of dependencies when generating dependency -graphs. This can help to make the graph more appealing -.TP -.B \-lDEBUG_DOMAINS, \-\-log-domains=DEBUG_DOMAINS -Show debug logging for the specified logging domains -.TP -.B \-P, \-\-profile -profile the command and print a report -.TP -.B \-uUI, \-\-ui=UI -User interface to use. Currently, knotty, taskexp or ncurses can be specified as UI. -.TP -.B \-tSERVERTYPE, \-\-servertype=SERVERTYPE -Choose which server to use, none, process or xmlrpc. -.TP -.B \-\-revisions-changed -Set the exit code depending on whether upstream floating revisions have changed or not. -.TP -.B \-\-server-only -Run bitbake without UI, the frontend can connect with bitbake server itself. -.TP -.B \-BBIND, \-\-bind=BIND -The name/address for the bitbake server to bind to. -.TP -.B \-\-no\-setscene -Do not run any setscene tasks, forces builds. - -.SH ENVIRONMENT VARIABLES -bitbake uses the following environment variables to control its -operation: -.TP -.B BITBAKE_UI -The bitbake user interface; overridden by the \fB-u\fP commandline option. - -.SH AUTHORS -BitBake was written by -Phil Blundell, -Holger Freyther, -Chris Larson, -Mickey Lauer, -Richard Purdie, -Holger Schurig -.PP -This manual page was written by Marcin Juszkiewicz -for the Debian project (but may be used by others). diff --git a/bitbake/doc/conf.py b/bitbake/doc/conf.py deleted file mode 100644 index bce386624e..0000000000 --- a/bitbake/doc/conf.py +++ /dev/null @@ -1,109 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) - -import sys -import datetime - -from pathlib import Path - -current_version = "dev" - -# String used in sidebar -version = 'Version: ' + current_version -if current_version == 'dev': - version = 'Version: Current Development' -# Version seen in documentation_options.js and hence in js switchers code -release = current_version - -# -- Project information ----------------------------------------------------- - -project = 'Bitbake' -copyright = '2004-%s, Richard Purdie, Chris Larson, and Phil Blundell' \ - % datetime.datetime.now().year -author = 'Richard Purdie, Chris Larson, and Phil Blundell' - -# external links and substitutions -extlinks = { - 'yocto_docs': ('https://docs.yoctoproject.org%s', None), - 'oe_lists': ('https://lists.openembedded.org%s', None), - 'wikipedia': ('https://en.wikipedia.org/wiki/%s', None), -} - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autosectionlabel', - 'sphinx.ext.extlinks', - 'sphinx.ext.autodoc', -] -autosectionlabel_prefix_document = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - -# master document name. The default changed from contents to index. so better -# set it ourselves. -master_doc = 'index' - -# create substitution for project configuration variables -rst_prolog = """ -.. |project_name| replace:: %s -.. |copyright| replace:: %s -.. |author| replace:: %s -""" % (project, copyright, author) - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -try: - import sphinx_rtd_theme - html_theme = 'sphinx_rtd_theme' -except ImportError: - sys.stderr.write("The Sphinx sphinx_rtd_theme HTML theme was not found.\ - \nPlease make sure to install the sphinx_rtd_theme python package.\n") - sys.exit(1) - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['sphinx-static'] - -# Add customm CSS and JS files -html_css_files = ['theme_overrides.css'] -html_js_files = ['switchers.js'] - -# Hide 'Created using Sphinx' text -html_show_sphinx = False - -# Add 'Last updated' on each page -html_last_updated_fmt = '%b %d, %Y' - -# Remove the trailing 'dot' in section numbers -html_secnumber_suffix = " " - -# autoconf needs the modules available to auto-generate documentation from the -# code -sys.path.insert(0, str(Path('..', 'lib').resolve())) diff --git a/bitbake/doc/genindex.rst b/bitbake/doc/genindex.rst deleted file mode 100644 index a4af06f656..0000000000 --- a/bitbake/doc/genindex.rst +++ /dev/null @@ -1,3 +0,0 @@ -===== -Index -===== diff --git a/bitbake/doc/index.rst b/bitbake/doc/index.rst deleted file mode 100644 index 9f2a9067d3..0000000000 --- a/bitbake/doc/index.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -=================== -BitBake User Manual -=================== - -| - -.. toctree:: - :caption: Table of Contents - :numbered: - - bitbake-user-manual/bitbake-user-manual-intro - bitbake-user-manual/bitbake-user-manual-execution - bitbake-user-manual/bitbake-user-manual-environment-setup - bitbake-user-manual/bitbake-user-manual-metadata - bitbake-user-manual/bitbake-user-manual-ref-variables-context - bitbake-user-manual/bitbake-user-manual-fetching - bitbake-user-manual/bitbake-user-manual-ref-variables - bitbake-user-manual/bitbake-user-manual-library-functions - bitbake-user-manual/bitbake-user-manual-hello - -.. toctree:: - :maxdepth: 1 - :hidden: - - genindex - releases - ----- - -.. include:: - -| BitBake Community -| Copyright |copy| |copyright| -| - -This work is licensed under the Creative Commons Attribution License. To view a -copy of this license, visit http://creativecommons.org/licenses/by/2.5/ or send -a letter to Creative Commons, 444 Castro Street, Suite 900, Mountain View, -California 94041, USA. diff --git a/bitbake/doc/releases.rst b/bitbake/doc/releases.rst deleted file mode 100644 index 676db66ec5..0000000000 --- a/bitbake/doc/releases.rst +++ /dev/null @@ -1,193 +0,0 @@ -.. SPDX-License-Identifier: CC-BY-2.5 - -================================= -BitBake Supported Release Manuals -================================= - -****************************** -Release Series 5.2 (walnascar) -****************************** - -- :yocto_docs:`BitBake 2.12 User Manual ` - -******************************* -Release Series 5.0 (scarthgap) -******************************* - -- :yocto_docs:`BitBake 2.8 User Manual ` - -****************************** -Release Series 4.0 (kirkstone) -****************************** - -- :yocto_docs:`BitBake 2.0 User Manual ` - -================================ -BitBake Outdated Release Manuals -================================ - -**************************** -Release Series 5.1 (styhead) -**************************** - -- :yocto_docs:`BitBake 2.10 User Manual ` - -******************************* -Release Series 4.3 (nanbield) -******************************* - -- :yocto_docs:`BitBake 2.6 User Manual ` - -******************************* -Release Series 4.2 (mickledore) -******************************* - -- :yocto_docs:`BitBake 2.4 User Manual ` - -***************************** -Release Series 4.1 (langdale) -***************************** - -- :yocto_docs:`BitBake 2.2 User Manual ` - -****************************** -Release Series 3.4 (honister) -****************************** - -- :yocto_docs:`BitBake 1.52 User Manual ` - -****************************** -Release Series 3.3 (hardknott) -****************************** - -- :yocto_docs:`BitBake 1.50 User Manual ` - -******************************* -Release Series 3.2 (gatesgarth) -******************************* - -- :yocto_docs:`BitBake 1.48 User Manual ` - -**************************** -Release Series 3.1 (dunfell) -**************************** - -- :yocto_docs:`BitBake 1.46 User Manual ` -- :yocto_docs:`3.1 BitBake User Manual ` -- :yocto_docs:`3.1.1 BitBake User Manual ` -- :yocto_docs:`3.1.2 BitBake User Manual ` -- :yocto_docs:`3.1.3 BitBake User Manual ` - -************************* -Release Series 3.0 (zeus) -************************* - -- :yocto_docs:`3.0 BitBake User Manual ` -- :yocto_docs:`3.0.1 BitBake User Manual ` -- :yocto_docs:`3.0.2 BitBake User Manual ` -- :yocto_docs:`3.0.3 BitBake User Manual ` -- :yocto_docs:`3.0.4 BitBake User Manual ` - -**************************** -Release Series 2.7 (warrior) -**************************** - -- :yocto_docs:`2.7 BitBake User Manual ` -- :yocto_docs:`2.7.1 BitBake User Manual ` -- :yocto_docs:`2.7.2 BitBake User Manual ` -- :yocto_docs:`2.7.3 BitBake User Manual ` -- :yocto_docs:`2.7.4 BitBake User Manual ` - -************************* -Release Series 2.6 (thud) -************************* - -- :yocto_docs:`2.6 BitBake User Manual ` -- :yocto_docs:`2.6.1 BitBake User Manual ` -- :yocto_docs:`2.6.2 BitBake User Manual ` -- :yocto_docs:`2.6.3 BitBake User Manual ` -- :yocto_docs:`2.6.4 BitBake User Manual ` - -************************* -Release Series 2.5 (sumo) -************************* - -- :yocto_docs:`2.5 Documentation ` -- :yocto_docs:`2.5.1 Documentation ` -- :yocto_docs:`2.5.2 Documentation ` -- :yocto_docs:`2.5.3 Documentation ` - -************************** -Release Series 2.4 (rocko) -************************** - -- :yocto_docs:`2.4 BitBake User Manual ` -- :yocto_docs:`2.4.1 BitBake User Manual ` -- :yocto_docs:`2.4.2 BitBake User Manual ` -- :yocto_docs:`2.4.3 BitBake User Manual ` -- :yocto_docs:`2.4.4 BitBake User Manual ` - -************************* -Release Series 2.3 (pyro) -************************* - -- :yocto_docs:`2.3 BitBake User Manual ` -- :yocto_docs:`2.3.1 BitBake User Manual ` -- :yocto_docs:`2.3.2 BitBake User Manual ` -- :yocto_docs:`2.3.3 BitBake User Manual ` -- :yocto_docs:`2.3.4 BitBake User Manual ` - -************************** -Release Series 2.2 (morty) -************************** - -- :yocto_docs:`2.2 BitBake User Manual ` -- :yocto_docs:`2.2.1 BitBake User Manual ` -- :yocto_docs:`2.2.2 BitBake User Manual ` -- :yocto_docs:`2.2.3 BitBake User Manual ` - -**************************** -Release Series 2.1 (krogoth) -**************************** - -- :yocto_docs:`2.1 BitBake User Manual ` -- :yocto_docs:`2.1.1 BitBake User Manual ` -- :yocto_docs:`2.1.2 BitBake User Manual ` -- :yocto_docs:`2.1.3 BitBake User Manual ` - -*************************** -Release Series 2.0 (jethro) -*************************** - -- :yocto_docs:`1.9 BitBake User Manual ` -- :yocto_docs:`2.0 BitBake User Manual ` -- :yocto_docs:`2.0.1 BitBake User Manual ` -- :yocto_docs:`2.0.2 BitBake User Manual ` -- :yocto_docs:`2.0.3 BitBake User Manual ` - -************************* -Release Series 1.8 (fido) -************************* - -- :yocto_docs:`1.8 BitBake User Manual ` -- :yocto_docs:`1.8.1 BitBake User Manual ` -- :yocto_docs:`1.8.2 BitBake User Manual ` - -************************** -Release Series 1.7 (dizzy) -************************** - -- :yocto_docs:`1.7 BitBake User Manual ` -- :yocto_docs:`1.7.1 BitBake User Manual ` -- :yocto_docs:`1.7.2 BitBake User Manual ` -- :yocto_docs:`1.7.3 BitBake User Manual ` - -************************** -Release Series 1.6 (daisy) -************************** - -- :yocto_docs:`1.6 BitBake User Manual ` -- :yocto_docs:`1.6.1 BitBake User Manual ` -- :yocto_docs:`1.6.2 BitBake User Manual ` -- :yocto_docs:`1.6.3 BitBake User Manual ` - diff --git a/bitbake/doc/sphinx-static/switchers.js b/bitbake/doc/sphinx-static/switchers.js deleted file mode 100644 index 32113cfa96..0000000000 --- a/bitbake/doc/sphinx-static/switchers.js +++ /dev/null @@ -1,233 +0,0 @@ -(function() { - 'use strict'; - - var all_versions = { - 'dev': 'dev (3.2)', - '3.1.2': '3.1.2', - '3.0.3': '3.0.3', - '2.7.4': '2.7.4', - }; - - var all_doctypes = { - 'single': 'Individual Webpages', - 'mega': "All-in-one 'Mega' Manual", - }; - - // Simple version comparision - // Return 1 if a > b - // Return -1 if a < b - // Return 0 if a == b - function ver_compare(a, b) { - if (a == "dev") { - return 1; - } - - if (a === b) { - return 0; - } - - var a_components = a.split("."); - var b_components = b.split("."); - - var len = Math.min(a_components.length, b_components.length); - - // loop while the components are equal - for (var i = 0; i < len; i++) { - // A bigger than B - if (parseInt(a_components[i]) > parseInt(b_components[i])) { - return 1; - } - - // B bigger than A - if (parseInt(a_components[i]) < parseInt(b_components[i])) { - return -1; - } - } - - // If one's a prefix of the other, the longer one is greater. - if (a_components.length > b_components.length) { - return 1; - } - - if (a_components.length < b_components.length) { - return -1; - } - - // Otherwise they are the same. - return 0; - } - - function build_version_select(current_series, current_version) { - var buf = [''); - return buf.join(''); - } - - function build_doctype_select(current_doctype) { - var buf = [''); - return buf.join(''); - } - - function navigate_to_first_existing(urls) { - // Navigate to the first existing URL in urls. - var url = urls.shift(); - - // Web browsers won't redirect file:// urls to file urls using ajax but - // its useful for local testing - if (url.startsWith("file://")) { - window.location.href = url; - return; - } - - if (urls.length == 0) { - window.location.href = url; - return; - } - $.ajax({ - url: url, - success: function() { - window.location.href = url; - }, - error: function() { - navigate_to_first_existing(urls); - } - }); - } - - function get_docroot_url() { - var url = window.location.href; - var root = DOCUMENTATION_OPTIONS.URL_ROOT; - - var urlarray = url.split('/'); - // Trim off anything after '/' - urlarray.pop(); - var depth = (root.match(/\.\.\//g) || []).length; - for (var i = 0; i < depth; i++) { - urlarray.pop(); - } - - return urlarray.join('/') + '/'; - } - - function on_version_switch() { - var selected_version = $(this).children('option:selected').attr('value'); - var url = window.location.href; - var current_version = DOCUMENTATION_OPTIONS.VERSION; - var docroot = get_docroot_url() - - var new_versionpath = selected_version + '/'; - if (selected_version == "dev") - new_versionpath = ''; - - // dev versions have no version prefix - if (current_version == "dev") { - var new_url = docroot + new_versionpath + url.replace(docroot, ""); - var fallback_url = docroot + new_versionpath; - } else { - var new_url = url.replace('/' + current_version + '/', '/' + new_versionpath); - var fallback_url = new_url.replace(url.replace(docroot, ""), ""); - } - - console.log(get_docroot_url()) - console.log(url + " to url " + new_url); - console.log(url + " to fallback " + fallback_url); - - if (new_url != url) { - navigate_to_first_existing([ - new_url, - fallback_url, - 'https://www.yoctoproject.org/docs/', - ]); - } - } - - function on_doctype_switch() { - var selected_doctype = $(this).children('option:selected').attr('value'); - var url = window.location.href; - if (selected_doctype == 'mega') { - var docroot = get_docroot_url() - var current_version = DOCUMENTATION_OPTIONS.VERSION; - // Assume manuals before 3.2 are using old docbook mega-manual - if (ver_compare(current_version, "3.2") < 0) { - var new_url = docroot + "mega-manual/mega-manual.html"; - } else { - var new_url = docroot + "singleindex.html"; - } - } else { - var new_url = url.replace("singleindex.html", "index.html") - } - - if (new_url != url) { - navigate_to_first_existing([ - new_url, - 'https://www.yoctoproject.org/docs/', - ]); - } - } - - // Returns the current doctype based upon the url - function doctype_segment_from_url(url) { - if (url.includes("singleindex") || url.includes("mega-manual")) - return "mega"; - return "single"; - } - - $(document).ready(function() { - var release = DOCUMENTATION_OPTIONS.VERSION; - var current_doctype = doctype_segment_from_url(window.location.href); - var current_series = release.substr(0, 3); - var version_select = build_version_select(current_series, release); - - $('.version_switcher_placeholder').html(version_select); - $('.version_switcher_placeholder select').bind('change', on_version_switch); - - var doctype_select = build_doctype_select(current_doctype); - - $('.doctype_switcher_placeholder').html(doctype_select); - $('.doctype_switcher_placeholder select').bind('change', on_doctype_switch); - - if (ver_compare(release, "3.1") < 0) { - $('#outdated-warning').html('Version ' + release + ' of the project is now considered obsolete, please select and use a more recent version'); - $('#outdated-warning').css('padding', '.5em'); - } else if (release != "dev") { - $.each(all_versions, function(version, title) { - var series = version.substr(0, 3); - if (series == current_series && version != release) { - $('#outdated-warning').html('This document is for outdated version ' + release + ', you should select the latest release version in this series, ' + version + '.'); - $('#outdated-warning').css('padding', '.5em'); - } - }); - } - }); -})(); diff --git a/bitbake/doc/sphinx-static/theme_overrides.css b/bitbake/doc/sphinx-static/theme_overrides.css deleted file mode 100644 index e362677a7f..0000000000 --- a/bitbake/doc/sphinx-static/theme_overrides.css +++ /dev/null @@ -1,162 +0,0 @@ -/* - SPDX-License-Identifier: CC-BY-2.0-UK -*/ - -body { - font-family: Verdana, Sans, sans-serif; - margin: 0em auto; - color: #333; -} - -h1,h2,h3,h4,h5,h6,h7 { - font-family: Arial, Sans; - color: #00557D; - clear: both; -} - -h1 { - font-size: 2em; - text-align: left; - padding: 0em 0em 0em 0em; - margin: 2em 0em 0em 0em; -} - -h2.subtitle { - margin: 0.10em 0em 3.0em 0em; - padding: 0em 0em 0em 0em; - font-size: 1.8em; - padding-left: 20%; - font-weight: normal; - font-style: italic; -} - -h2 { - margin: 2em 0em 0.66em 0em; - padding: 0.5em 0em 0em 0em; - font-size: 1.5em; - font-weight: bold; -} - -h3.subtitle { - margin: 0em 0em 1em 0em; - padding: 0em 0em 0em 0em; - font-size: 142.14%; - text-align: right; -} - -h3 { - margin: 1em 0em 0.5em 0em; - padding: 1em 0em 0em 0em; - font-size: 140%; - font-weight: bold; -} - -h4 { - margin: 1em 0em 0.5em 0em; - padding: 1em 0em 0em 0em; - font-size: 120%; - font-weight: bold; -} - -h5 { - margin: 1em 0em 0.5em 0em; - padding: 1em 0em 0em 0em; - font-size: 110%; - font-weight: bold; -} - -h6 { - margin: 1em 0em 0em 0em; - padding: 1em 0em 0em 0em; - font-size: 110%; - font-weight: bold; -} - -em { - font-weight: bold; -} - -.pre { - font-size: medium; - font-family: Courier, monospace; -} - -.wy-nav-content a { - text-decoration: underline; - color: #444; - background: transparent; -} - -.wy-nav-content a:hover { - text-decoration: underline; - background-color: #dedede; -} - -.wy-nav-content a:visited { - color: #444; -} - -[alt='Permalink'] { color: #eee; } -[alt='Permalink']:hover { color: black; } - -@media screen { - /* content column - * - * RTD theme's default is 800px as max width for the content, but we have - * tables with tons of columns, which need the full width of the view-port. - */ - - .wy-nav-content{max-width: none; } - - /* inline literal: drop the borderbox, padding and red color */ - code, .rst-content tt, .rst-content code { - color: inherit; - border: none; - padding: unset; - background: inherit; - font-size: 85%; - } - - .rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal { - color: inherit; - } - - /* Admonition should be gray, not blue or green */ - .rst-content .note .admonition-title, - .rst-content .tip .admonition-title, - .rst-content .warning .admonition-title, - .rst-content .caution .admonition-title, - .rst-content .important .admonition-title { - background: #f0f0f2; - color: #00557D; - - } - - .rst-content .note, - .rst-content .tip, - .rst-content .important, - .rst-content .warning, - .rst-content .caution { - background: #f0f0f2; - } - - /* Remove the icon in front of note/tip element, and before the logo */ - .icon-home:before, .rst-content .admonition-title:before { - display: none - } - - /* a custom informalexample container is used in some doc */ - .informalexample { - border: 1px solid; - border-color: #aaa; - margin: 1em 0em; - padding: 1em; - page-break-inside: avoid; - } - - /* Remove the blue background in the top left corner, around the logo */ - .wy-side-nav-search { - background: inherit; - } - -} diff --git a/bitbake/doc/template/Vera.ttf b/bitbake/doc/template/Vera.ttf deleted file mode 100644 index 58cd6b5e61..0000000000 Binary files a/bitbake/doc/template/Vera.ttf and /dev/null differ diff --git a/bitbake/doc/template/VeraMoBd.ttf b/bitbake/doc/template/VeraMoBd.ttf deleted file mode 100644 index 9be6547ed6..0000000000 Binary files a/bitbake/doc/template/VeraMoBd.ttf and /dev/null differ diff --git a/bitbake/doc/template/VeraMono.ttf b/bitbake/doc/template/VeraMono.ttf deleted file mode 100644 index 139f0b4311..0000000000 Binary files a/bitbake/doc/template/VeraMono.ttf and /dev/null differ diff --git a/bitbake/doc/template/draft.png b/bitbake/doc/template/draft.png deleted file mode 100644 index 53051a9ddd..0000000000 Binary files a/bitbake/doc/template/draft.png and /dev/null differ diff --git a/bitbake/lib/bb/COW.py b/bitbake/lib/bb/COW.py deleted file mode 100644 index 4af03c54ad..0000000000 --- a/bitbake/lib/bb/COW.py +++ /dev/null @@ -1,199 +0,0 @@ -# -# This is a copy on write dictionary and set which abuses classes to try and be nice and fast. -# -# Copyright (C) 2006 Tim Ansell -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Please Note: -# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW. -# Assign a file to __warn__ to get warnings about slow operations. -# - - -import copy - -ImmutableTypes = ( - bool, - complex, - float, - int, - tuple, - frozenset, - str -) - -MUTABLE = "__mutable__" - - -class COWMeta(type): - pass - - -class COWDictMeta(COWMeta): - __warn__ = False - __hasmutable__ = False - __marker__ = tuple() - - def __str__(cls): - ignored_keys = set(["__count__", "__doc__", "__module__", "__firstlineno__", "__static_attributes__"]) - keys = set(cls.__dict__.keys()) - ignored_keys - return "" % (cls.__count__, len(keys)) - - __repr__ = __str__ - - def cow(cls): - class C(cls): - __count__ = cls.__count__ + 1 - - return C - - copy = cow - __call__ = cow - - def __setitem__(cls, key, value): - if value is not None and not isinstance(value, ImmutableTypes): - if not isinstance(value, COWMeta): - cls.__hasmutable__ = True - key += MUTABLE - setattr(cls, key, value) - - def __getmutable__(cls, key, readonly=False): - nkey = key + MUTABLE - try: - return cls.__dict__[nkey] - except KeyError: - pass - - value = getattr(cls, nkey) - if readonly: - return value - - if not cls.__warn__ is False and not isinstance(value, COWMeta): - print("Warning: Doing a copy because %s is a mutable type." % key, file=cls.__warn__) - try: - value = value.copy() - except AttributeError as e: - value = copy.copy(value) - setattr(cls, nkey, value) - return value - - __getmarker__ = [] - - def __getreadonly__(cls, key, default=__getmarker__): - """ - Get a value (even if mutable) which you promise not to change. - """ - return cls.__getitem__(key, default, True) - - def __getitem__(cls, key, default=__getmarker__, readonly=False): - try: - try: - value = getattr(cls, key) - except AttributeError: - value = cls.__getmutable__(key, readonly) - - # This is for values which have been deleted - if value is cls.__marker__: - raise AttributeError("key %s does not exist." % key) - - return value - except AttributeError as e: - if not default is cls.__getmarker__: - return default - - raise KeyError(str(e)) - - def __delitem__(cls, key): - cls.__setitem__(key, cls.__marker__) - - def __revertitem__(cls, key): - if key not in cls.__dict__: - key += MUTABLE - delattr(cls, key) - - def __contains__(cls, key): - return cls.has_key(key) - - def has_key(cls, key): - value = cls.__getreadonly__(key, cls.__marker__) - if value is cls.__marker__: - return False - return True - - def iter(cls, type, readonly=False): - for key in dir(cls): - if key.startswith("__"): - continue - - if key.endswith(MUTABLE): - key = key[:-len(MUTABLE)] - - if type == "keys": - yield key - - try: - if readonly: - value = cls.__getreadonly__(key) - else: - value = cls[key] - except KeyError: - continue - - if type == "values": - yield value - if type == "items": - yield (key, value) - return - - def iterkeys(cls): - return cls.iter("keys") - - def itervalues(cls, readonly=False): - if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: - print("Warning: If you aren't going to change any of the values call with True.", file=cls.__warn__) - return cls.iter("values", readonly) - - def iteritems(cls, readonly=False): - if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: - print("Warning: If you aren't going to change any of the values call with True.", file=cls.__warn__) - return cls.iter("items", readonly) - - -class COWSetMeta(COWDictMeta): - def __str__(cls): - ignored_keys = set(["__count__", "__doc__", "__module__", "__firstlineno__", "__static_attributes__"]) - keys = set(cls.__dict__.keys()) - ignored_keys - return "" % (cls.__count__, len(keys)) - - __repr__ = __str__ - - def cow(cls): - class C(cls): - __count__ = cls.__count__ + 1 - - return C - - def add(cls, value): - COWDictMeta.__setitem__(cls, repr(hash(value)), value) - - def remove(cls, value): - COWDictMeta.__delitem__(cls, repr(hash(value))) - - def __in__(cls, value): - return repr(hash(value)) in COWDictMeta - - def iterkeys(cls): - raise TypeError("sets don't have keys") - - def iteritems(cls): - raise TypeError("sets don't have 'items'") - - -# These are the actual classes you use! -class COWDictBase(metaclass=COWDictMeta): - __count__ = 0 - - -class COWSetBase(metaclass=COWSetMeta): - __count__ = 0 diff --git a/bitbake/lib/bb/__init__.py b/bitbake/lib/bb/__init__.py deleted file mode 100644 index 407c4509d4..0000000000 --- a/bitbake/lib/bb/__init__.py +++ /dev/null @@ -1,312 +0,0 @@ -# -# BitBake Build System Python Library -# -# Copyright (C) 2003 Holger Schurig -# Copyright (C) 2003, 2004 Chris Larson -# -# Based on Gentoo's portage.py. -# -# SPDX-License-Identifier: GPL-2.0-only -# - -__version__ = "2.15.2" - -import sys -if sys.version_info < (3, 9, 0): - raise RuntimeError("Sorry, python 3.9.0 or later is required for this version of bitbake") - -if sys.version_info < (3, 10, 0): - # With python 3.8 and 3.9, we see errors of "libgcc_s.so.1 must be installed for pthread_cancel to work" - # https://stackoverflow.com/questions/64797838/libgcc-s-so-1-must-be-installed-for-pthread-cancel-to-work - # https://bugs.ams1.psf.io/issue42888 - # so ensure libgcc_s is loaded early on - import ctypes - libgcc_s = ctypes.CDLL('libgcc_s.so.1') - -class BBHandledException(Exception): - """ - The big dilemma for generic bitbake code is what information to give the user - when an exception occurs. Any exception inheriting this base exception class - has already provided information to the user via some 'fired' message type such as - an explicitly fired event using bb.fire, or a bb.error message. If bitbake - encounters an exception derived from this class, no backtrace or other information - will be given to the user, its assumed the earlier event provided the relevant information. - """ - pass - -import os -import logging -from collections import namedtuple -import multiprocessing as mp - -# Python 3.14 changes the default multiprocessing context from "fork" to -# "forkserver". However, bitbake heavily relies on "fork" behavior to -# efficiently pass data to the child processes. Places that need this should do: -# from bb import multiprocessing -# in place of -# import multiprocessing - -class MultiprocessingContext(object): - """ - Multiprocessing proxy object that uses the "fork" context for a property if - available, otherwise goes to the main multiprocessing module. This allows - it to be a drop-in replacement for the multiprocessing module, but use the - fork context - """ - def __init__(self): - super().__setattr__("_ctx", mp.get_context("fork")) - - def __getattr__(self, name): - if hasattr(self._ctx, name): - return getattr(self._ctx, name) - return getattr(mp, name) - - def __setattr__(self, name, value): - raise AttributeError(f"Unable to set attribute {name}") - -multiprocessing = MultiprocessingContext() - - -class NullHandler(logging.Handler): - def emit(self, record): - pass - -class BBLoggerMixin(object): - def __init__(self, *args, **kwargs): - # Does nothing to allow calling super() from derived classes - pass - - def setup_bblogger(self, name): - if name.split(".")[0] == "BitBake": - self.debug = self._debug_helper - - def _debug_helper(self, *args, **kwargs): - return self.bbdebug(1, *args, **kwargs) - - def debug2(self, *args, **kwargs): - return self.bbdebug(2, *args, **kwargs) - - def debug3(self, *args, **kwargs): - return self.bbdebug(3, *args, **kwargs) - - def bbdebug(self, level, msg, *args, **kwargs): - loglevel = logging.DEBUG - level + 1 - if not bb.event.worker_pid: - if self.name in bb.msg.loggerDefaultDomains and loglevel > (bb.msg.loggerDefaultDomains[self.name]): - return - if loglevel < bb.msg.loggerDefaultLogLevel: - return - - if not isinstance(level, int) or not isinstance(msg, str): - mainlogger.warning("Invalid arguments in bbdebug: %s" % repr((level, msg,) + args)) - - return self.log(loglevel, msg, *args, **kwargs) - - def plain(self, msg, *args, **kwargs): - return self.log(logging.INFO + 1, msg, *args, **kwargs) - - def verbose(self, msg, *args, **kwargs): - return self.log(logging.INFO - 1, msg, *args, **kwargs) - - def verbnote(self, msg, *args, **kwargs): - return self.log(logging.INFO + 2, msg, *args, **kwargs) - - def warnonce(self, msg, *args, **kwargs): - return self.log(logging.WARNING - 1, msg, *args, **kwargs) - - def erroronce(self, msg, *args, **kwargs): - return self.log(logging.ERROR - 1, msg, *args, **kwargs) - - -Logger = logging.getLoggerClass() -class BBLogger(Logger, BBLoggerMixin): - def __init__(self, name, *args, **kwargs): - self.setup_bblogger(name) - super().__init__(name, *args, **kwargs) - -logging.raiseExceptions = False -logging.setLoggerClass(BBLogger) - -class BBLoggerAdapter(logging.LoggerAdapter, BBLoggerMixin): - def __init__(self, logger, *args, **kwargs): - self.setup_bblogger(logger.name) - super().__init__(logger, *args, **kwargs) - -logging.LoggerAdapter = BBLoggerAdapter - -logger = logging.getLogger("BitBake") -logger.addHandler(NullHandler()) -logger.setLevel(logging.DEBUG - 2) - -mainlogger = logging.getLogger("BitBake.Main") - -class PrefixLoggerAdapter(logging.LoggerAdapter): - def __init__(self, prefix, logger): - super().__init__(logger, {}) - self.__msg_prefix = prefix - - def process(self, msg, kwargs): - return "%s%s" %(self.__msg_prefix, msg), kwargs - -# This has to be imported after the setLoggerClass, as the import of bb.msg -# can result in construction of the various loggers. -import bb.msg - -from bb import fetch2 as fetch -sys.modules['bb.fetch'] = sys.modules['bb.fetch2'] - -# Messaging convenience functions -def plain(*args): - """ - Prints a message at "plain" level (higher level than a ``bb.note()``). - - Arguments: - - - ``args``: one or more strings to print. - """ - mainlogger.plain(''.join(args)) - -def debug(lvl, *args): - """ - Prints a debug message. - - Arguments: - - - ``lvl``: debug level. Higher value increases the debug level - (determined by ``bitbake -D``). - - ``args``: one or more strings to print. - """ - if isinstance(lvl, str): - mainlogger.warning("Passed invalid debug level '%s' to bb.debug", lvl) - args = (lvl,) + args - lvl = 1 - mainlogger.bbdebug(lvl, ''.join(args)) - -def note(*args): - """ - Prints a message at "note" level. - - Arguments: - - - ``args``: one or more strings to print. - """ - mainlogger.info(''.join(args)) - -def verbnote(*args): - """ - A higher priority note which will show on the console but isn't a warning. - - Use in contexts when something is happening the user should be aware of but - they probably did something to make it happen. - - Arguments: - - - ``args``: one or more strings to print. - """ - mainlogger.verbnote(''.join(args)) - -# -# Warnings - things the user likely needs to pay attention to and fix -# -def warn(*args): - """ - Prints a warning message. - - Arguments: - - - ``args``: one or more strings to print. - """ - mainlogger.warning(''.join(args)) - -def warnonce(*args): - """ - Prints a warning message like ``bb.warn()``, but only prints the message - once. - - Arguments: - - - ``args``: one or more strings to print. - """ - mainlogger.warnonce(''.join(args)) - -def error(*args, **kwargs): - """ - Prints an error message. - - Arguments: - - - ``args``: one or more strings to print. - """ - mainlogger.error(''.join(args), extra=kwargs) - -def erroronce(*args): - """ - Prints an error message like ``bb.error()``, but only prints the message - once. - - Arguments: - - - ``args``: one or more strings to print. - """ - mainlogger.erroronce(''.join(args)) - -def fatal(*args, **kwargs): - """ - Prints an error message and stops the BitBake execution. - - Arguments: - - - ``args``: one or more strings to print. - """ - mainlogger.critical(''.join(args), extra=kwargs) - raise BBHandledException() - -def deprecated(func, name=None, advice=""): - """This is a decorator which can be used to mark functions - as deprecated. It will result in a warning being emitted - when the function is used.""" - import warnings - - if advice: - advice = ": %s" % advice - if name is None: - name = func.__name__ - - def newFunc(*args, **kwargs): - warnings.warn("Call to deprecated function %s%s." % (name, - advice), - category=DeprecationWarning, - stacklevel=2) - return func(*args, **kwargs) - newFunc.__name__ = func.__name__ - newFunc.__doc__ = func.__doc__ - newFunc.__dict__.update(func.__dict__) - return newFunc - -# For compatibility -def deprecate_import(current, modulename, fromlist, renames = None): - """Import objects from one module into another, wrapping them with a DeprecationWarning""" - - module = __import__(modulename, fromlist = fromlist) - for position, objname in enumerate(fromlist): - obj = getattr(module, objname) - newobj = deprecated(obj, "{0}.{1}".format(current, objname), - "Please use {0}.{1} instead".format(modulename, objname)) - if renames: - newname = renames[position] - else: - newname = objname - - setattr(sys.modules[current], newname, newobj) - -TaskData = namedtuple("TaskData", [ - "pn", - "taskname", - "fn", - "deps", - "provides", - "taskhash", - "unihash", - "hashfn", - "taskhash_deps", -]) diff --git a/bitbake/lib/bb/acl.py b/bitbake/lib/bb/acl.py deleted file mode 100755 index e9dbdb617f..0000000000 --- a/bitbake/lib/bb/acl.py +++ /dev/null @@ -1,213 +0,0 @@ -#! /usr/bin/env python3 -# -# Copyright 2023 by Garmin Ltd. or its subsidiaries -# -# SPDX-License-Identifier: MIT - - -import sys -import ctypes -import os -import errno -import pwd -import grp - -libacl = ctypes.CDLL("libacl.so.1", use_errno=True) - - -ACL_TYPE_ACCESS = 0x8000 -ACL_TYPE_DEFAULT = 0x4000 - -ACL_FIRST_ENTRY = 0 -ACL_NEXT_ENTRY = 1 - -ACL_UNDEFINED_TAG = 0x00 -ACL_USER_OBJ = 0x01 -ACL_USER = 0x02 -ACL_GROUP_OBJ = 0x04 -ACL_GROUP = 0x08 -ACL_MASK = 0x10 -ACL_OTHER = 0x20 - -ACL_READ = 0x04 -ACL_WRITE = 0x02 -ACL_EXECUTE = 0x01 - -acl_t = ctypes.c_void_p -acl_entry_t = ctypes.c_void_p -acl_permset_t = ctypes.c_void_p -acl_perm_t = ctypes.c_uint - -acl_tag_t = ctypes.c_int - -libacl.acl_free.argtypes = [acl_t] - - -def acl_free(acl): - libacl.acl_free(acl) - - -libacl.acl_get_file.restype = acl_t -libacl.acl_get_file.argtypes = [ctypes.c_char_p, ctypes.c_uint] - - -def acl_get_file(path, typ): - acl = libacl.acl_get_file(os.fsencode(path), typ) - if acl is None: - err = ctypes.get_errno() - raise OSError(err, os.strerror(err), str(path)) - - return acl - - -libacl.acl_get_entry.argtypes = [acl_t, ctypes.c_int, ctypes.c_void_p] - - -def acl_get_entry(acl, entry_id): - entry = acl_entry_t() - ret = libacl.acl_get_entry(acl, entry_id, ctypes.byref(entry)) - if ret < 0: - err = ctypes.get_errno() - raise OSError(err, os.strerror(err)) - - if ret == 0: - return None - - return entry - - -libacl.acl_get_tag_type.argtypes = [acl_entry_t, ctypes.c_void_p] - - -def acl_get_tag_type(entry_d): - tag = acl_tag_t() - ret = libacl.acl_get_tag_type(entry_d, ctypes.byref(tag)) - if ret < 0: - err = ctypes.get_errno() - raise OSError(err, os.strerror(err)) - return tag.value - - -libacl.acl_get_qualifier.restype = ctypes.c_void_p -libacl.acl_get_qualifier.argtypes = [acl_entry_t] - - -def acl_get_qualifier(entry_d): - ret = libacl.acl_get_qualifier(entry_d) - if ret is None: - err = ctypes.get_errno() - raise OSError(err, os.strerror(err)) - return ctypes.c_void_p(ret) - - -libacl.acl_get_permset.argtypes = [acl_entry_t, ctypes.c_void_p] - - -def acl_get_permset(entry_d): - permset = acl_permset_t() - ret = libacl.acl_get_permset(entry_d, ctypes.byref(permset)) - if ret < 0: - err = ctypes.get_errno() - raise OSError(err, os.strerror(err)) - - return permset - - -libacl.acl_get_perm.argtypes = [acl_permset_t, acl_perm_t] - - -def acl_get_perm(permset_d, perm): - ret = libacl.acl_get_perm(permset_d, perm) - if ret < 0: - err = ctypes.get_errno() - raise OSError(err, os.strerror(err)) - return bool(ret) - - -class Entry(object): - def __init__(self, tag, qualifier, mode): - self.tag = tag - self.qualifier = qualifier - self.mode = mode - - def __str__(self): - typ = "" - qual = "" - if self.tag == ACL_USER: - typ = "user" - qual = pwd.getpwuid(self.qualifier).pw_name - elif self.tag == ACL_GROUP: - typ = "group" - qual = grp.getgrgid(self.qualifier).gr_name - elif self.tag == ACL_USER_OBJ: - typ = "user" - elif self.tag == ACL_GROUP_OBJ: - typ = "group" - elif self.tag == ACL_MASK: - typ = "mask" - elif self.tag == ACL_OTHER: - typ = "other" - - r = "r" if self.mode & ACL_READ else "-" - w = "w" if self.mode & ACL_WRITE else "-" - x = "x" if self.mode & ACL_EXECUTE else "-" - - return f"{typ}:{qual}:{r}{w}{x}" - - -class ACL(object): - def __init__(self, acl): - self.acl = acl - - def __del__(self): - acl_free(self.acl) - - def entries(self): - entry_id = ACL_FIRST_ENTRY - while True: - entry = acl_get_entry(self.acl, entry_id) - if entry is None: - break - - permset = acl_get_permset(entry) - - mode = 0 - for m in (ACL_READ, ACL_WRITE, ACL_EXECUTE): - if acl_get_perm(permset, m): - mode |= m - - qualifier = None - tag = acl_get_tag_type(entry) - - if tag == ACL_USER or tag == ACL_GROUP: - qual = acl_get_qualifier(entry) - qualifier = ctypes.cast(qual, ctypes.POINTER(ctypes.c_int))[0] - - yield Entry(tag, qualifier, mode) - - entry_id = ACL_NEXT_ENTRY - - @classmethod - def from_path(cls, path, typ): - acl = acl_get_file(path, typ) - return cls(acl) - - -def main(): - import argparse - from pathlib import Path - - parser = argparse.ArgumentParser() - parser.add_argument("path", help="File Path", type=Path) - - args = parser.parse_args() - - acl = ACL.from_path(args.path, ACL_TYPE_ACCESS) - for entry in acl.entries(): - print(str(entry)) - - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/bitbake/lib/bb/asyncrpc/__init__.py b/bitbake/lib/bb/asyncrpc/__init__.py deleted file mode 100644 index a4371643d7..0000000000 --- a/bitbake/lib/bb/asyncrpc/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - - -from .client import AsyncClient, Client -from .serv import AsyncServer, AsyncServerConnection -from .connection import DEFAULT_MAX_CHUNK -from .exceptions import ( - ClientError, - ServerError, - ConnectionClosedError, - InvokeError, -) diff --git a/bitbake/lib/bb/asyncrpc/client.py b/bitbake/lib/bb/asyncrpc/client.py deleted file mode 100644 index 17b72033b9..0000000000 --- a/bitbake/lib/bb/asyncrpc/client.py +++ /dev/null @@ -1,271 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import abc -import asyncio -import json -import os -import socket -import sys -import re -import contextlib -from threading import Thread -from .connection import StreamConnection, WebsocketConnection, DEFAULT_MAX_CHUNK -from .exceptions import ConnectionClosedError, InvokeError - -UNIX_PREFIX = "unix://" -WS_PREFIX = "ws://" -WSS_PREFIX = "wss://" - -ADDR_TYPE_UNIX = 0 -ADDR_TYPE_TCP = 1 -ADDR_TYPE_WS = 2 - -WEBSOCKETS_MIN_VERSION = (9, 1) -# Need websockets 10 with python 3.10+ -if sys.version_info >= (3, 10, 0): - WEBSOCKETS_MIN_VERSION = (10, 0) - - -def parse_address(addr): - if addr.startswith(UNIX_PREFIX): - return (ADDR_TYPE_UNIX, (addr[len(UNIX_PREFIX) :],)) - elif addr.startswith(WS_PREFIX) or addr.startswith(WSS_PREFIX): - return (ADDR_TYPE_WS, (addr,)) - else: - m = re.match(r"\[(?P[^\]]*)\]:(?P\d+)$", addr) - if m is not None: - host = m.group("host") - port = m.group("port") - else: - host, port = addr.split(":") - - return (ADDR_TYPE_TCP, (host, int(port))) - - -class AsyncClient(object): - def __init__( - self, - proto_name, - proto_version, - logger, - timeout=30, - server_headers=False, - headers={}, - ): - self.socket = None - self.max_chunk = DEFAULT_MAX_CHUNK - self.proto_name = proto_name - self.proto_version = proto_version - self.logger = logger - self.timeout = timeout - self.needs_server_headers = server_headers - self.server_headers = {} - self.headers = headers - - async def connect_tcp(self, address, port): - async def connect_sock(): - reader, writer = await asyncio.open_connection(address, port) - return StreamConnection(reader, writer, self.timeout, self.max_chunk) - - self._connect_sock = connect_sock - - async def connect_unix(self, path): - async def connect_sock(): - # AF_UNIX has path length issues so chdir here to workaround - cwd = os.getcwd() - try: - os.chdir(os.path.dirname(path)) - # The socket must be opened synchronously so that CWD doesn't get - # changed out from underneath us so we pass as a sock into asyncio - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) - sock.connect(os.path.basename(path)) - finally: - os.chdir(cwd) - reader, writer = await asyncio.open_unix_connection(sock=sock) - return StreamConnection(reader, writer, self.timeout, self.max_chunk) - - self._connect_sock = connect_sock - - async def connect_websocket(self, uri): - import websockets - - try: - version = tuple( - int(v) - for v in websockets.__version__.split(".")[ - 0 : len(WEBSOCKETS_MIN_VERSION) - ] - ) - except ValueError: - raise ImportError( - f"Unable to parse websockets version '{websockets.__version__}'" - ) - - if version < WEBSOCKETS_MIN_VERSION: - min_ver_str = ".".join(str(v) for v in WEBSOCKETS_MIN_VERSION) - raise ImportError( - f"Websockets version {websockets.__version__} is less than minimum required version {min_ver_str}" - ) - - async def connect_sock(): - try: - websocket = await websockets.connect( - uri, - ping_interval=None, - open_timeout=self.timeout, - ) - except asyncio.exceptions.TimeoutError: - raise ConnectionError("Timeout while connecting to websocket") - except (OSError, websockets.InvalidHandshake, websockets.InvalidURI) as exc: - raise ConnectionError(f"Could not connect to websocket: {exc}") from exc - return WebsocketConnection(websocket, self.timeout) - - self._connect_sock = connect_sock - - async def setup_connection(self): - # Send headers - await self.socket.send("%s %s" % (self.proto_name, self.proto_version)) - await self.socket.send( - "needs-headers: %s" % ("true" if self.needs_server_headers else "false") - ) - for k, v in self.headers.items(): - await self.socket.send("%s: %s" % (k, v)) - - # End of headers - await self.socket.send("") - - self.server_headers = {} - if self.needs_server_headers: - while True: - line = await self.socket.recv() - if not line: - # End headers - break - tag, value = line.split(":", 1) - self.server_headers[tag.lower()] = value.strip() - - async def get_header(self, tag, default): - await self.connect() - return self.server_headers.get(tag, default) - - async def connect(self): - if self.socket is None: - self.socket = await self._connect_sock() - await self.setup_connection() - - async def disconnect(self): - if self.socket is not None: - await self.socket.close() - self.socket = None - - async def close(self): - await self.disconnect() - - async def _send_wrapper(self, proc): - count = 0 - while True: - try: - await self.connect() - return await proc() - except ( - OSError, - ConnectionError, - ConnectionClosedError, - json.JSONDecodeError, - UnicodeDecodeError, - ) as e: - self.logger.warning("Error talking to server: %s" % e) - if count >= 3: - if not isinstance(e, ConnectionError): - raise ConnectionError(str(e)) - raise e - await self.close() - count += 1 - - def check_invoke_error(self, msg): - if isinstance(msg, dict) and "invoke-error" in msg: - raise InvokeError(msg["invoke-error"]["message"]) - - async def invoke(self, msg): - async def proc(): - await self.socket.send_message(msg) - return await self.socket.recv_message() - - result = await self._send_wrapper(proc) - self.check_invoke_error(result) - return result - - async def ping(self): - return await self.invoke({"ping": {}}) - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_value, traceback): - await self.close() - - -class Client(object): - def __init__(self): - self.client = self._get_async_client() - self.loop = asyncio.new_event_loop() - - # Override any pre-existing loop. - # Without this, the PR server export selftest triggers a hang - # when running with Python 3.7. The drawback is that there is - # potential for issues if the PR and hash equiv (or some new) - # clients need to both be instantiated in the same process. - # This should be revisited if/when Python 3.9 becomes the - # minimum required version for BitBake, as it seems not - # required (but harmless) with it. - asyncio.set_event_loop(self.loop) - - self._add_methods("connect_tcp", "ping") - - @abc.abstractmethod - def _get_async_client(self): - pass - - def _get_downcall_wrapper(self, downcall): - def wrapper(*args, **kwargs): - return self.loop.run_until_complete(downcall(*args, **kwargs)) - - return wrapper - - def _add_methods(self, *methods): - for m in methods: - downcall = getattr(self.client, m) - setattr(self, m, self._get_downcall_wrapper(downcall)) - - def connect_unix(self, path): - self.loop.run_until_complete(self.client.connect_unix(path)) - self.loop.run_until_complete(self.client.connect()) - - @property - def max_chunk(self): - return self.client.max_chunk - - @max_chunk.setter - def max_chunk(self, value): - self.client.max_chunk = value - - def disconnect(self): - self.loop.run_until_complete(self.client.close()) - - def close(self): - if self.loop: - self.loop.run_until_complete(self.client.close()) - self.loop.run_until_complete(self.loop.shutdown_asyncgens()) - self.loop.close() - self.loop = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - return False diff --git a/bitbake/lib/bb/asyncrpc/connection.py b/bitbake/lib/bb/asyncrpc/connection.py deleted file mode 100644 index 7f0cf6ba96..0000000000 --- a/bitbake/lib/bb/asyncrpc/connection.py +++ /dev/null @@ -1,146 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import asyncio -import itertools -import json -from datetime import datetime -from .exceptions import ClientError, ConnectionClosedError - - -# The Python async server defaults to a 64K receive buffer, so we hardcode our -# maximum chunk size. It would be better if the client and server reported to -# each other what the maximum chunk sizes were, but that will slow down the -# connection setup with a round trip delay so I'd rather not do that unless it -# is necessary -DEFAULT_MAX_CHUNK = 32 * 1024 - - -def chunkify(msg, max_chunk): - if len(msg) < max_chunk - 1: - yield "".join((msg, "\n")) - else: - yield "".join((json.dumps({"chunk-stream": None}), "\n")) - - args = [iter(msg)] * (max_chunk - 1) - for m in map("".join, itertools.zip_longest(*args, fillvalue="")): - yield "".join(itertools.chain(m, "\n")) - yield "\n" - - -def json_serialize(obj): - if isinstance(obj, datetime): - return obj.isoformat() - raise TypeError("Type %s not serializeable" % type(obj)) - - -class StreamConnection(object): - def __init__(self, reader, writer, timeout, max_chunk=DEFAULT_MAX_CHUNK): - self.reader = reader - self.writer = writer - self.timeout = timeout - self.max_chunk = max_chunk - - @property - def address(self): - return self.writer.get_extra_info("peername") - - async def send_message(self, msg): - for c in chunkify(json.dumps(msg, default=json_serialize), self.max_chunk): - self.writer.write(c.encode("utf-8")) - await self.writer.drain() - - async def recv_message(self): - l = await self.recv() - - m = json.loads(l) - if not m: - return m - - if "chunk-stream" in m: - lines = [] - while True: - l = await self.recv() - if not l: - break - lines.append(l) - - m = json.loads("".join(lines)) - - return m - - async def send(self, msg): - self.writer.write(("%s\n" % msg).encode("utf-8")) - await self.writer.drain() - - async def recv(self): - if self.timeout < 0: - line = await self.reader.readline() - else: - try: - line = await asyncio.wait_for(self.reader.readline(), self.timeout) - except asyncio.TimeoutError: - raise ConnectionError("Timed out waiting for data") - - if not line: - raise ConnectionClosedError("Connection closed") - - line = line.decode("utf-8") - - if not line.endswith("\n"): - raise ConnectionError("Bad message %r" % (line)) - - return line.rstrip() - - async def close(self): - self.reader = None - if self.writer is not None: - self.writer.close() - self.writer = None - - -class WebsocketConnection(object): - def __init__(self, socket, timeout): - self.socket = socket - self.timeout = timeout - - @property - def address(self): - return ":".join(str(s) for s in self.socket.remote_address) - - async def send_message(self, msg): - await self.send(json.dumps(msg, default=json_serialize)) - - async def recv_message(self): - m = await self.recv() - return json.loads(m) - - async def send(self, msg): - import websockets.exceptions - - try: - await self.socket.send(msg) - except websockets.exceptions.ConnectionClosed: - raise ConnectionClosedError("Connection closed") - - async def recv(self): - import websockets.exceptions - - try: - if self.timeout < 0: - return await self.socket.recv() - - try: - return await asyncio.wait_for(self.socket.recv(), self.timeout) - except asyncio.TimeoutError: - raise ConnectionError("Timed out waiting for data") - except websockets.exceptions.ConnectionClosed: - raise ConnectionClosedError("Connection closed") - - async def close(self): - if self.socket is not None: - await self.socket.close() - self.socket = None diff --git a/bitbake/lib/bb/asyncrpc/exceptions.py b/bitbake/lib/bb/asyncrpc/exceptions.py deleted file mode 100644 index ae1043a38b..0000000000 --- a/bitbake/lib/bb/asyncrpc/exceptions.py +++ /dev/null @@ -1,21 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - - -class ClientError(Exception): - pass - - -class InvokeError(Exception): - pass - - -class ServerError(Exception): - pass - - -class ConnectionClosedError(Exception): - pass diff --git a/bitbake/lib/bb/asyncrpc/serv.py b/bitbake/lib/bb/asyncrpc/serv.py deleted file mode 100644 index bd1aded8db..0000000000 --- a/bitbake/lib/bb/asyncrpc/serv.py +++ /dev/null @@ -1,413 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import abc -import asyncio -import json -import os -import signal -import socket -import sys -from bb import multiprocessing -import logging -from .connection import StreamConnection, WebsocketConnection -from .exceptions import ClientError, ServerError, ConnectionClosedError, InvokeError - - -class ClientLoggerAdapter(logging.LoggerAdapter): - def process(self, msg, kwargs): - return f"[Client {self.extra['address']}] {msg}", kwargs - - -class AsyncServerConnection(object): - # If a handler returns this object (e.g. `return self.NO_RESPONSE`), no - # return message will be automatically be sent back to the client - NO_RESPONSE = object() - - def __init__(self, socket, proto_name, logger): - self.socket = socket - self.proto_name = proto_name - self.handlers = { - "ping": self.handle_ping, - } - self.logger = ClientLoggerAdapter( - logger, - { - "address": socket.address, - }, - ) - self.client_headers = {} - - async def close(self): - await self.socket.close() - - async def handle_headers(self, headers): - return {} - - async def process_requests(self): - try: - self.logger.info("Client %r connected" % (self.socket.address,)) - - # Read protocol and version - client_protocol = await self.socket.recv() - if not client_protocol: - return - - (client_proto_name, client_proto_version) = client_protocol.split() - if client_proto_name != self.proto_name: - self.logger.debug("Rejecting invalid protocol %s" % (self.proto_name)) - return - - self.proto_version = tuple(int(v) for v in client_proto_version.split(".")) - if not self.validate_proto_version(): - self.logger.debug( - "Rejecting invalid protocol version %s" % (client_proto_version) - ) - return - - # Read headers - self.client_headers = {} - while True: - header = await self.socket.recv() - if not header: - # Empty line. End of headers - break - tag, value = header.split(":", 1) - self.client_headers[tag.lower()] = value.strip() - - if self.client_headers.get("needs-headers", "false") == "true": - for k, v in (await self.handle_headers(self.client_headers)).items(): - await self.socket.send("%s: %s" % (k, v)) - await self.socket.send("") - - # Handle messages - while True: - d = await self.socket.recv_message() - if d is None: - break - try: - response = await self.dispatch_message(d) - except InvokeError as e: - await self.socket.send_message( - {"invoke-error": {"message": str(e)}} - ) - break - - if response is not self.NO_RESPONSE: - await self.socket.send_message(response) - - except ConnectionClosedError as e: - self.logger.info(str(e)) - except (ClientError, ConnectionError) as e: - self.logger.error(str(e)) - finally: - await self.close() - - async def dispatch_message(self, msg): - for k in self.handlers.keys(): - if k in msg: - self.logger.debug("Handling %s" % k) - return await self.handlers[k](msg[k]) - - raise ClientError("Unrecognized command %r" % msg) - - async def handle_ping(self, request): - return {"alive": True} - - -class StreamServer(object): - def __init__(self, handler, logger): - self.handler = handler - self.logger = logger - self.closed = False - - async def handle_stream_client(self, reader, writer): - # writer.transport.set_write_buffer_limits(0) - socket = StreamConnection(reader, writer, -1) - if self.closed: - await socket.close() - return - - await self.handler(socket) - - async def stop(self): - self.closed = True - - -class TCPStreamServer(StreamServer): - def __init__(self, host, port, handler, logger, *, reuseport=False): - super().__init__(handler, logger) - self.host = host - self.port = port - self.reuseport = reuseport - - def start(self, loop): - self.server = loop.run_until_complete( - asyncio.start_server( - self.handle_stream_client, - self.host, - self.port, - reuse_port=self.reuseport, - ) - ) - - for s in self.server.sockets: - self.logger.debug("Listening on %r" % (s.getsockname(),)) - # Newer python does this automatically. Do it manually here for - # maximum compatibility - s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) - s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1) - - # Enable keep alives. This prevents broken client connections - # from persisting on the server for long periods of time. - s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30) - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15) - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4) - - name = self.server.sockets[0].getsockname() - if self.server.sockets[0].family == socket.AF_INET6: - self.address = "[%s]:%d" % (name[0], name[1]) - else: - self.address = "%s:%d" % (name[0], name[1]) - - return [self.server.wait_closed()] - - async def stop(self): - await super().stop() - self.server.close() - - def cleanup(self): - pass - - -class UnixStreamServer(StreamServer): - def __init__(self, path, handler, logger): - super().__init__(handler, logger) - self.path = path - - def start(self, loop): - cwd = os.getcwd() - try: - # Work around path length limits in AF_UNIX - os.chdir(os.path.dirname(self.path)) - self.server = loop.run_until_complete( - asyncio.start_unix_server( - self.handle_stream_client, os.path.basename(self.path) - ) - ) - finally: - os.chdir(cwd) - - self.logger.debug("Listening on %r" % self.path) - self.address = "unix://%s" % os.path.abspath(self.path) - return [self.server.wait_closed()] - - async def stop(self): - await super().stop() - self.server.close() - - def cleanup(self): - try: - os.unlink(self.path) - except FileNotFoundError: - pass - - -class WebsocketsServer(object): - def __init__(self, host, port, handler, logger, *, reuseport=False): - self.host = host - self.port = port - self.handler = handler - self.logger = logger - self.reuseport = reuseport - - def start(self, loop): - import websockets.server - - self.server = loop.run_until_complete( - websockets.server.serve( - self.client_handler, - self.host, - self.port, - ping_interval=None, - reuse_port=self.reuseport, - ) - ) - - for s in self.server.sockets: - self.logger.debug("Listening on %r" % (s.getsockname(),)) - - # Enable keep alives. This prevents broken client connections - # from persisting on the server for long periods of time. - s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 30) - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15) - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4) - - name = self.server.sockets[0].getsockname() - if self.server.sockets[0].family == socket.AF_INET6: - self.address = "ws://[%s]:%d" % (name[0], name[1]) - else: - self.address = "ws://%s:%d" % (name[0], name[1]) - - return [self.server.wait_closed()] - - async def stop(self): - self.server.close() - - def cleanup(self): - pass - - async def client_handler(self, websocket): - socket = WebsocketConnection(websocket, -1) - await self.handler(socket) - - -class AsyncServer(object): - def __init__(self, logger): - self.logger = logger - self.loop = None - self.run_tasks = [] - - def start_tcp_server(self, host, port, *, reuseport=False): - self.server = TCPStreamServer( - host, - port, - self._client_handler, - self.logger, - reuseport=reuseport, - ) - - def start_unix_server(self, path): - self.server = UnixStreamServer(path, self._client_handler, self.logger) - - def start_websocket_server(self, host, port, reuseport=False): - self.server = WebsocketsServer( - host, - port, - self._client_handler, - self.logger, - reuseport=reuseport, - ) - - async def _client_handler(self, socket): - address = socket.address - try: - client = self.accept_client(socket) - await client.process_requests() - except Exception as e: - import traceback - - self.logger.error( - "Error from client %s: %s" % (address, str(e)), exc_info=True - ) - traceback.print_exc() - finally: - self.logger.debug("Client %s disconnected", address) - await socket.close() - - @abc.abstractmethod - def accept_client(self, socket): - pass - - async def stop(self): - self.logger.debug("Stopping server") - await self.server.stop() - - def start(self): - tasks = self.server.start(self.loop) - self.address = self.server.address - return tasks - - def signal_handler(self): - self.logger.debug("Got exit signal") - self.loop.create_task(self.stop()) - - def _serve_forever(self, tasks): - try: - self.loop.add_signal_handler(signal.SIGTERM, self.signal_handler) - self.loop.add_signal_handler(signal.SIGINT, self.signal_handler) - self.loop.add_signal_handler(signal.SIGQUIT, self.signal_handler) - signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM]) - - self.loop.run_until_complete(asyncio.gather(*tasks)) - - self.logger.debug("Server shutting down") - finally: - self.server.cleanup() - - def serve_forever(self): - """ - Serve requests in the current process - """ - self._create_loop() - tasks = self.start() - self._serve_forever(tasks) - self.loop.close() - - def _create_loop(self): - # Create loop and override any loop that may have existed in - # a parent process. It is possible that the usecases of - # serve_forever might be constrained enough to allow using - # get_event_loop here, but better safe than sorry for now. - self.loop = asyncio.new_event_loop() - asyncio.set_event_loop(self.loop) - - def serve_as_process(self, *, prefunc=None, args=(), log_level=None): - """ - Serve requests in a child process - """ - - def run(queue): - # Create loop and override any loop that may have existed - # in a parent process. Without doing this and instead - # using get_event_loop, at the very minimum the hashserv - # unit tests will hang when running the second test. - # This happens since get_event_loop in the spawned server - # process for the second testcase ends up with the loop - # from the hashserv client created in the unit test process - # when running the first testcase. The problem is somewhat - # more general, though, as any potential use of asyncio in - # Cooker could create a loop that needs to replaced in this - # new process. - self._create_loop() - try: - self.address = None - tasks = self.start() - finally: - # Always put the server address to wake up the parent task - queue.put(self.address) - queue.close() - - if prefunc is not None: - prefunc(self, *args) - - if log_level is not None: - self.logger.setLevel(log_level) - - self._serve_forever(tasks) - - self.loop.run_until_complete(self.loop.shutdown_asyncgens()) - self.loop.close() - - queue = multiprocessing.Queue() - - # Temporarily block SIGTERM. The server process will inherit this - # block which will ensure it doesn't receive the SIGTERM until the - # handler is ready for it - mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGTERM]) - try: - self.process = multiprocessing.Process(target=run, args=(queue,)) - self.process.start() - - self.address = queue.get() - queue.close() - queue.join_thread() - - return self.process - finally: - signal.pthread_sigmask(signal.SIG_SETMASK, mask) diff --git a/bitbake/lib/bb/build.py b/bitbake/lib/bb/build.py deleted file mode 100644 index 40839a81b5..0000000000 --- a/bitbake/lib/bb/build.py +++ /dev/null @@ -1,1036 +0,0 @@ -# -# BitBake 'Build' implementation -# -# Core code for function execution and task handling in the -# BitBake build tools. -# -# Copyright (C) 2003, 2004 Chris Larson -# -# Based on Gentoo's portage.py. -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import os -import sys -import logging -import glob -import itertools -import time -import re -import stat -import datetime -import bb -import bb.msg -import bb.process -import bb.progress -from io import StringIO -from bb import data, event, utils - -bblogger = logging.getLogger('BitBake') -logger = logging.getLogger('BitBake.Build') - -verboseShellLogging = False -verboseStdoutLogging = False - -__mtime_cache = {} - -def cached_mtime_noerror(f): - if f not in __mtime_cache: - try: - __mtime_cache[f] = os.stat(f)[stat.ST_MTIME] - except OSError: - return 0 - return __mtime_cache[f] - -def reset_cache(): - global __mtime_cache - __mtime_cache = {} - -# When we execute a Python function, we'd like certain things -# in all namespaces, hence we add them to __builtins__. -# If we do not do this and use the exec globals, they will -# not be available to subfunctions. -if hasattr(__builtins__, '__setitem__'): - builtins = __builtins__ -else: - builtins = __builtins__.__dict__ - -builtins['bb'] = bb -builtins['os'] = os - -class TaskBase(event.Event): - """Base class for task events""" - - def __init__(self, t, fn, logfile, d): - self._task = t - self._fn = fn - self._package = d.getVar("PF") - self._mc = d.getVar("BB_CURRENT_MC") - self.taskfile = d.getVar("FILE") - self.taskname = self._task - self.logfile = logfile - self.time = time.time() - self.pn = d.getVar("PN") - self.pv = d.getVar("PV") - event.Event.__init__(self) - self._message = "recipe %s: task %s: %s" % (d.getVar("PF"), t, self.getDisplayName()) - - def getTask(self): - return self._task - - def setTask(self, task): - self._task = task - - def getDisplayName(self): - return bb.event.getName(self)[4:] - - task = property(getTask, setTask, None, "task property") - -class TaskStarted(TaskBase): - """Task execution started""" - def __init__(self, t, fn, logfile, taskflags, d): - super(TaskStarted, self).__init__(t, fn, logfile, d) - self.taskflags = taskflags - -class TaskSucceeded(TaskBase): - """Task execution completed""" - -class TaskFailed(TaskBase): - """Task execution failed""" - - def __init__(self, task, fn, logfile, metadata, errprinted = False): - self.errprinted = errprinted - super(TaskFailed, self).__init__(task, fn, logfile, metadata) - -class TaskFailedSilent(TaskBase): - """Task execution failed (silently)""" - def getDisplayName(self): - # Don't need to tell the user it was silent - return "Failed" - -class TaskInvalid(TaskBase): - - def __init__(self, task, fn, metadata): - super(TaskInvalid, self).__init__(task, fn, None, metadata) - self._message = "No such task '%s'" % task - -class TaskProgress(event.Event): - """ - Task made some progress that could be reported to the user, usually in - the form of a progress bar or similar. - NOTE: this class does not inherit from TaskBase since it doesn't need - to - it's fired within the task context itself, so we don't have any of - the context information that you do in the case of the other events. - The event PID can be used to determine which task it came from. - The progress value is normally 0-100, but can also be negative - indicating that progress has been made but we aren't able to determine - how much. - The rate is optional, this is simply an extra string to display to the - user if specified. - """ - def __init__(self, progress, rate=None): - self.progress = progress - self.rate = rate - event.Event.__init__(self) - - -class LogTee(object): - def __init__(self, logger, outfile): - self.outfile = outfile - self.logger = logger - self.name = self.outfile.name - - def write(self, string): - self.logger.plain(string) - self.outfile.write(string) - - def __enter__(self): - self.outfile.__enter__() - return self - - def __exit__(self, *excinfo): - self.outfile.__exit__(*excinfo) - - def __repr__(self): - return ''.format(self.name) - - def flush(self): - self.outfile.flush() - - -class StdoutNoopContextManager: - """ - This class acts like sys.stdout, but adds noop __enter__ and __exit__ methods. - """ - def __enter__(self): - return sys.stdout - - def __exit__(self, *exc_info): - pass - - def write(self, string): - return sys.stdout.write(string) - - def flush(self): - sys.stdout.flush() - - @property - def name(self): - if "name" in dir(sys.stdout): - return sys.stdout.name - return "" - - -def exec_func(func, d, dirs = None): - """Execute a BB 'function'""" - - try: - oldcwd = os.getcwd() - except: - oldcwd = None - - flags = d.getVarFlags(func) - cleandirs = flags.get('cleandirs') if flags else None - if cleandirs: - for cdir in d.expand(cleandirs).split(): - bb.utils.remove(cdir, True) - bb.utils.mkdirhier(cdir) - if cdir == oldcwd: - os.chdir(cdir) - - if flags and dirs is None: - dirs = flags.get('dirs') - if dirs: - dirs = d.expand(dirs).split() - - if dirs: - for adir in dirs: - bb.utils.mkdirhier(adir) - adir = dirs[-1] - else: - adir = None - - body = d.getVar(func, False) - if not body: - if body is None: - logger.warning("Function %s doesn't exist", func) - return - - ispython = flags.get('python') - - lockflag = flags.get('lockfiles') - if lockflag: - lockfiles = [f for f in d.expand(lockflag).split()] - else: - lockfiles = None - - tempdir = d.getVar('T') - - # or func allows items to be executed outside of the normal - # task set, such as buildhistory - task = d.getVar('BB_RUNTASK') or func - if task == func: - taskfunc = task - else: - taskfunc = "%s.%s" % (task, func) - - runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}" - runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid()) - runfile = os.path.join(tempdir, runfn) - bb.utils.mkdirhier(os.path.dirname(runfile)) - - # Setup the courtesy link to the runfn, only for tasks - # we create the link 'just' before the run script is created - # if we create it after, and if the run script fails, then the - # link won't be created as an exception would be fired. - if task == func: - runlink = os.path.join(tempdir, 'run.{0}'.format(task)) - if runlink: - bb.utils.remove(runlink) - - try: - os.symlink(runfn, runlink) - except OSError: - pass - - with bb.utils.fileslocked(lockfiles): - if ispython: - exec_func_python(func, d, runfile, cwd=adir) - else: - exec_func_shell(func, d, runfile, cwd=adir) - - try: - curcwd = os.getcwd() - except: - curcwd = None - - if oldcwd and curcwd != oldcwd: - try: - bb.warn("Task %s changed cwd to %s" % (func, curcwd)) - os.chdir(oldcwd) - except: - pass - -_functionfmt = """ -{function}(d) -""" -logformatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s") -def exec_func_python(func, d, runfile, cwd=None): - """Execute a python BB 'function'""" - - code = _functionfmt.format(function=func) - bb.utils.mkdirhier(os.path.dirname(runfile)) - with open(runfile, 'w') as script: - bb.data.emit_func_python(func, script, d) - - if cwd: - try: - olddir = os.getcwd() - except OSError as e: - bb.warn("%s: Cannot get cwd: %s" % (func, e)) - olddir = None - os.chdir(cwd) - - bb.debug(2, "Executing python function %s" % func) - - try: - text = "def %s(d):\n%s" % (func, d.getVar(func, False)) - fn = d.getVarFlag(func, "filename", False) - lineno = int(d.getVarFlag(func, "lineno", False)) - bb.methodpool.insert_method(func, text, fn, lineno - 1) - - if verboseStdoutLogging: - sys.stdout.flush() - sys.stderr.flush() - currout = sys.stdout - currerr = sys.stderr - sys.stderr = sys.stdout = execio = StringIO() - comp = utils.better_compile(code, func, "exec_func_python() autogenerated") - utils.better_exec(comp, {"d": d}, code, "exec_func_python() autogenerated") - finally: - if verboseStdoutLogging: - execio.flush() - logger.plain("%s" % execio.getvalue()) - sys.stdout = currout - sys.stderr = currerr - execio.close() - # We want any stdout/stderr to be printed before any other log messages to make debugging - # more accurate. In some cases we seem to lose stdout/stderr entirely in logging tests without this. - sys.stdout.flush() - sys.stderr.flush() - bb.debug(2, "Python function %s finished" % func) - - if cwd and olddir: - try: - os.chdir(olddir) - except OSError as e: - bb.warn("%s: Cannot restore cwd %s: %s" % (func, olddir, e)) - -def shell_trap_code(): - return '''#!/bin/sh\n -__BITBAKE_LAST_LINE=0 - -# Emit a useful diagnostic if something fails: -bb_sh_exit_handler() { - ret=$? - if [ "$ret" != 0 ]; then - echo "WARNING: exit code $ret from a shell command." - fi - exit $ret -} - -bb_bash_exit_handler() { - ret=$? - { set +x; } > /dev/null - trap "" DEBUG - if [ "$ret" != 0 ]; then - echo "WARNING: ${BASH_SOURCE[0]}:${__BITBAKE_LAST_LINE} exit $ret from '$1'" - - echo "WARNING: Backtrace (BB generated script): " - for i in $(seq 1 $((${#FUNCNAME[@]} - 1))); do - if [ "$i" -eq 1 ]; then - echo -e "\t#$((i)): ${FUNCNAME[$i]}, ${BASH_SOURCE[$((i-1))]}, line ${__BITBAKE_LAST_LINE}" - else - echo -e "\t#$((i)): ${FUNCNAME[$i]}, ${BASH_SOURCE[$((i-1))]}, line ${BASH_LINENO[$((i-1))]}" - fi - done - fi - exit $ret -} - -bb_bash_debug_handler() { - local line=${BASH_LINENO[0]} - # For some reason the DEBUG trap trips with lineno=1 when scripts exit; ignore it - if [ "$line" -eq 1 ]; then - return - fi - - # Track the line number of commands as they execute. This is so we can have access to the failing line number - # in the EXIT trap. See http://gnu-bash.2382.n7.nabble.com/trap-echo-quot-trap-exit-on-LINENO-quot-EXIT-gt-wrong-linenumber-td3666.html - if [ "${FUNCNAME[1]}" != "bb_bash_exit_handler" ]; then - __BITBAKE_LAST_LINE=$line - fi -} - -case $BASH_VERSION in -"") trap 'bb_sh_exit_handler' 0 - set -e - ;; -*) trap 'bb_bash_exit_handler "$BASH_COMMAND"' 0 - trap '{ bb_bash_debug_handler; } 2>/dev/null' DEBUG - set -e - shopt -s extdebug - ;; -esac -''' - -def create_progress_handler(func, progress, logfile, d): - if progress == 'percent': - # Use default regex - return bb.progress.BasicProgressHandler(d, outfile=logfile) - elif progress.startswith('percent:'): - # Use specified regex - return bb.progress.BasicProgressHandler(d, regex=progress.split(':', 1)[1], outfile=logfile) - elif progress.startswith('outof:'): - # Use specified regex - return bb.progress.OutOfProgressHandler(d, regex=progress.split(':', 1)[1], outfile=logfile) - elif progress.startswith("custom:"): - # Use a custom progress handler that was injected via other means - import functools - from types import ModuleType - - parts = progress.split(":", 2) - _, cls, otherargs = parts[0], parts[1], (parts[2] or None) if parts[2:] else None - if cls: - def resolve(x, y): - if not x: - return None - if isinstance(x, ModuleType): - return getattr(x, y, None) - return x.get(y) - cls_obj = functools.reduce(resolve, cls.split("."), bb.utils._context) - if not cls_obj: - # Fall-back on __builtins__ - cls_obj = functools.reduce(resolve, cls.split("."), __builtins__) - if cls_obj: - return cls_obj(d, outfile=logfile, otherargs=otherargs) - bb.warn('%s: unknown custom progress handler in task progress varflag value "%s", ignoring' % (func, cls)) - else: - bb.warn('%s: invalid task progress varflag value "%s", ignoring' % (func, progress)) - - return logfile - -def exec_func_shell(func, d, runfile, cwd=None): - """Execute a shell function from the metadata - - Note on directory behavior. The 'dirs' varflag should contain a list - of the directories you need created prior to execution. The last - item in the list is where we will chdir/cd to. - """ - - # Don't let the emitted shell script override PWD - d.delVarFlag('PWD', 'export') - - with open(runfile, 'w') as script: - script.write(shell_trap_code()) - - bb.data.emit_func(func, script, d) - - if verboseShellLogging or bb.utils.to_boolean(d.getVar("BB_VERBOSE_LOGS", False)): - script.write("set -x\n") - if cwd: - script.write("cd '%s'\n" % cwd) - script.write("%s\n" % func) - script.write(''' -# cleanup -ret=$? -trap '' 0 -exit $ret -''') - - os.chmod(runfile, 0o775) - - cmd = runfile - if d.getVarFlag(func, 'fakeroot', False): - fakerootcmd = d.getVar('FAKEROOT') - if fakerootcmd: - cmd = [fakerootcmd, runfile] - - # We only want to output to logger via LogTee if stdout is sys.__stdout__ (which will either - # be real stdout or subprocess PIPE or similar). In other cases we are being run "recursively", - # ie. inside another function, in which case stdout is already being captured so we don't - # want to Tee here as output would be printed twice, and out of order. - if verboseStdoutLogging and sys.stdout == sys.__stdout__: - logfile = LogTee(logger, StdoutNoopContextManager()) - else: - logfile = StdoutNoopContextManager() - - progress = d.getVarFlag(func, 'progress') - if progress: - try: - logfile = create_progress_handler(func, progress, logfile, d) - except: - from traceback import format_exc - logger.error("Failed to create progress handler") - logger.error(format_exc()) - raise - - fifobuffer = bytearray() - def readfifo(data): - nonlocal fifobuffer - fifobuffer.extend(data) - while fifobuffer: - message, token, nextmsg = fifobuffer.partition(b"\00") - if token: - splitval = message.split(b' ', 1) - cmd = splitval[0].decode("utf-8") - if len(splitval) > 1: - value = splitval[1].decode("utf-8") - else: - value = '' - if cmd == 'bbplain': - bb.plain(value) - elif cmd == 'bbnote': - bb.note(value) - elif cmd == 'bbverbnote': - bb.verbnote(value) - elif cmd == 'bbwarn': - bb.warn(value) - elif cmd == 'bberror': - bb.error(value) - elif cmd == 'bbfatal': - # The caller will call exit themselves, so bb.error() is - # what we want here rather than bb.fatal() - bb.error(value) - elif cmd == 'bbfatal_log': - bb.error(value, forcelog=True) - elif cmd == 'bbdebug': - splitval = value.split(' ', 1) - level = int(splitval[0]) - value = splitval[1] - bb.debug(level, value) - else: - bb.warn("Unrecognised command '%s' on FIFO" % cmd) - fifobuffer = nextmsg - else: - break - - tempdir = d.getVar('T') - fifopath = os.path.join(tempdir, 'fifo.%s' % os.getpid()) - if os.path.exists(fifopath): - os.unlink(fifopath) - os.mkfifo(fifopath) - with open(fifopath, 'r+b', buffering=0) as fifo: - try: - bb.debug(2, "Executing shell function %s" % func) - with open(os.devnull, 'r+') as stdin, logfile: - bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)]) - except bb.process.ExecutionError as exe: - # Find the backtrace that the shell trap generated - backtrace_marker_regex = re.compile(r"WARNING: Backtrace \(BB generated script\)") - stdout_lines = (exe.stdout or "").split("\n") - backtrace_start_line = None - for i, line in enumerate(reversed(stdout_lines)): - if backtrace_marker_regex.search(line): - backtrace_start_line = len(stdout_lines) - i - break - - # Read the backtrace frames, starting at the location we just found - backtrace_entry_regex = re.compile(r"#(?P\d+): (?P[^\s]+), (?P.+?), line (" - r"?P\d+)") - backtrace_frames = [] - if backtrace_start_line: - for line in itertools.islice(stdout_lines, backtrace_start_line, None): - match = backtrace_entry_regex.search(line) - if match: - backtrace_frames.append(match.groupdict()) - - with open(runfile, "r") as script: - script_lines = [line.rstrip() for line in script.readlines()] - - # For each backtrace frame, search backwards in the script (from the line number called out by the frame), - # to find the comment that emit_vars injected when it wrote the script. This will give us the metadata - # filename (e.g. .bb or .bbclass) and line number where the shell function was originally defined. - script_metadata_comment_regex = re.compile(r"# line: (?P\d+), file: (?P.+)") - better_frames = [] - # Skip the very last frame since it's just the call to the shell task in the body of the script - for frame in backtrace_frames[:-1]: - # Check whether the frame corresponds to a function defined in the script vs external script. - if os.path.samefile(frame["file"], runfile): - # Search backwards from the frame lineno to locate the comment that BB injected - i = int(frame["lineno"]) - 1 - while i >= 0: - match = script_metadata_comment_regex.match(script_lines[i]) - if match: - # Calculate the relative line in the function itself - relative_line_in_function = int(frame["lineno"]) - i - 2 - # Calculate line in the function as declared in the metadata - metadata_function_line = relative_line_in_function + int(match["lineno"]) - better_frames.append("#{frameno}: {funcname}, {file}, line {lineno}".format( - frameno=frame["frameno"], - funcname=frame["funcname"], - file=match["file"], - lineno=metadata_function_line - )) - break - i -= 1 - else: - better_frames.append("#{frameno}: {funcname}, {file}, line {lineno}".format(**frame)) - - if better_frames: - better_frames = ("\t{0}".format(frame) for frame in better_frames) - exe.extra_message = "\nBacktrace (metadata-relative locations):\n{0}".format("\n".join(better_frames)) - raise - finally: - os.unlink(fifopath) - - bb.debug(2, "Shell function %s finished" % func) - -def _task_data(fn, task, d): - localdata = bb.data.createCopy(d) - localdata.setVar('BB_FILENAME', fn) - localdata.setVar('OVERRIDES', 'task-%s:%s' % - (task[3:].replace('_', '-'), d.getVar('OVERRIDES', False))) - bb.data.expandKeys(localdata) - return localdata - -def _exec_task(fn, task, d, quieterr): - """Execute a BB 'task' - - Execution of a task involves a bit more setup than executing a function, - running it with its own local metadata, and with some useful variables set. - """ - if not d.getVarFlag(task, 'task', False): - event.fire(TaskInvalid(task, fn, d), d) - logger.error("No such task: %s" % task) - return 1 - - logger.debug("Executing task %s", task) - - localdata = _task_data(fn, task, d) - tempdir = localdata.getVar('T') - if not tempdir: - bb.fatal("T variable not set, unable to build") - - # Change nice level if we're asked to - nice = localdata.getVar("BB_TASK_NICE_LEVEL") - if nice: - curnice = os.nice(0) - nice = int(nice) - curnice - newnice = os.nice(nice) - logger.debug("Renice to %s " % newnice) - ionice = localdata.getVar("BB_TASK_IONICE_LEVEL") - if ionice: - try: - cls, prio = ionice.split(".", 1) - bb.utils.ioprio_set(os.getpid(), int(cls), int(prio)) - except: - bb.warn("Invalid ionice level %s" % ionice) - - bb.utils.mkdirhier(tempdir) - - # Determine the logfile to generate - logfmt = localdata.getVar('BB_LOGFMT') or 'log.{task}.{pid}' - logbase = logfmt.format(task=task, pid=os.getpid()) - - # Document the order of the tasks... - logorder = os.path.join(tempdir, 'log.task_order') - try: - with open(logorder, 'a') as logorderfile: - timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S.%f") - logorderfile.write('{0} {1} ({2}): {3}\n'.format(timestamp, task, os.getpid(), logbase)) - except OSError: - logger.exception("Opening log file '%s'", logorder) - pass - - # Setup the courtesy link to the logfn - loglink = os.path.join(tempdir, 'log.{0}'.format(task)) - logfn = os.path.join(tempdir, logbase) - if loglink: - bb.utils.remove(loglink) - - try: - os.symlink(logbase, loglink) - except OSError: - pass - - prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True) - postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True) - - class ErrorCheckHandler(logging.Handler): - def __init__(self): - self.triggered = False - logging.Handler.__init__(self, logging.ERROR) - def emit(self, record): - if getattr(record, 'forcelog', False): - self.triggered = False - else: - self.triggered = True - - # Handle logfiles - try: - bb.utils.mkdirhier(os.path.dirname(logfn)) - logfile = open(logfn, 'w') - except OSError: - logger.exception("Opening log file '%s'", logfn) - pass - - # Dup the existing fds so we dont lose them - osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()] - oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()] - ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()] - - # Replace those fds with our own - with open('/dev/null', 'r') as si: - os.dup2(si.fileno(), osi[1]) - os.dup2(logfile.fileno(), oso[1]) - os.dup2(logfile.fileno(), ose[1]) - - # Ensure Python logging goes to the logfile - handler = logging.StreamHandler(logfile) - handler.setFormatter(logformatter) - # Always enable full debug output into task logfiles - handler.setLevel(logging.DEBUG - 2) - bblogger.addHandler(handler) - - errchk = ErrorCheckHandler() - bblogger.addHandler(errchk) - - localdata.setVar('BB_LOGFILE', logfn) - localdata.setVar('BB_RUNTASK', task) - localdata.setVar('BB_TASK_LOGGER', bblogger) - - flags = localdata.getVarFlags(task) - - try: - try: - event.fire(TaskStarted(task, fn, logfn, flags, localdata), localdata) - - for func in (prefuncs or '').split(): - exec_func(func, localdata) - exec_func(task, localdata) - for func in (postfuncs or '').split(): - exec_func(func, localdata) - finally: - # Need to flush and close the logs before sending events where the - # UI may try to look at the logs. - sys.stdout.flush() - sys.stderr.flush() - - bblogger.removeHandler(handler) - - # Restore the backup fds - os.dup2(osi[0], osi[1]) - os.dup2(oso[0], oso[1]) - os.dup2(ose[0], ose[1]) - - # Close the backup fds - os.close(osi[0]) - os.close(oso[0]) - os.close(ose[0]) - - logfile.close() - if os.path.exists(logfn) and os.path.getsize(logfn) == 0: - logger.debug2("Zero size logfn %s, removing", logfn) - bb.utils.remove(logfn) - bb.utils.remove(loglink) - except (Exception, SystemExit) as exc: - handled = False - if isinstance(exc, bb.BBHandledException): - handled = True - - if quieterr: - if not handled: - logger.warning(str(exc)) - event.fire(TaskFailedSilent(task, fn, logfn, localdata), localdata) - else: - errprinted = errchk.triggered - # If the output is already on stdout, we've printed the information in the - # logs once already so don't duplicate - if verboseStdoutLogging or handled: - errprinted = True - if not handled: - logger.error(str(exc)) - event.fire(TaskFailed(task, fn, logfn, localdata, errprinted), localdata) - return 1 - - event.fire(TaskSucceeded(task, fn, logfn, localdata), localdata) - - if not localdata.getVarFlag(task, 'nostamp', False) and not localdata.getVarFlag(task, 'selfstamp', False): - make_stamp(task, localdata) - - return 0 - -def exec_task(fn, task, d, profile = False): - try: - quieterr = False - if d.getVarFlag(task, "quieterrors", False) is not None: - quieterr = True - - if profile: - profname = "profile-%s.log" % (d.getVar("PN") + "-" + task) - try: - import cProfile as profile - except: - import profile - prof = profile.Profile() - ret = profile.Profile.runcall(prof, _exec_task, fn, task, d, quieterr) - prof.dump_stats(profname) - bb.utils.process_profilelog(profname) - - return ret - else: - return _exec_task(fn, task, d, quieterr) - - except Exception: - from traceback import format_exc - if not quieterr: - logger.error("Build of %s failed" % (task)) - logger.error(format_exc()) - failedevent = TaskFailed(task, None, d, True) - event.fire(failedevent, d) - return 1 - -def _get_cleanmask(taskname, mcfn): - """ - Internal stamp helper function to generate stamp cleaning mask - Returns the stamp path+filename - - In the bitbake core, d can be a CacheData and file_name will be set. - When called in task context, d will be a data store, file_name will not be set - """ - cleanmask = bb.parse.siggen.stampcleanmask_mcfn(taskname, mcfn) - taskflagname = taskname.replace("_setscene", "") - if cleanmask: - return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")] - return [] - -def clean_stamp_mcfn(task, mcfn): - cleanmask = _get_cleanmask(task, mcfn) - for mask in cleanmask: - for name in glob.glob(mask): - # Preserve sigdata files in the stamps directory - if "sigdata" in name or "sigbasedata" in name: - continue - # Preserve taint files in the stamps directory - if name.endswith('.taint'): - continue - os.unlink(name) - -def clean_stamp(task, d): - mcfn = d.getVar('BB_FILENAME') - clean_stamp_mcfn(task, mcfn) - -def make_stamp_mcfn(task, mcfn): - - basestamp = bb.parse.siggen.stampfile_mcfn(task, mcfn) - - stampdir = os.path.dirname(basestamp) - if cached_mtime_noerror(stampdir) == 0: - bb.utils.mkdirhier(stampdir) - - clean_stamp_mcfn(task, mcfn) - - # Remove the file and recreate to force timestamp - # change on broken NFS filesystems - if basestamp: - bb.utils.remove(basestamp) - open(basestamp, "w").close() - -def make_stamp(task, d): - """ - Creates/updates a stamp for a given task - """ - mcfn = d.getVar('BB_FILENAME') - - make_stamp_mcfn(task, mcfn) - - # If we're in task context, write out a signature file for each task - # as it completes - if not task.endswith("_setscene"): - stampbase = bb.parse.siggen.stampfile_base(mcfn) - bb.parse.siggen.dump_sigtask(mcfn, task, stampbase, True) - - -def find_stale_stamps(task, mcfn): - current = bb.parse.siggen.stampfile_mcfn(task, mcfn) - current2 = bb.parse.siggen.stampfile_mcfn(task + "_setscene", mcfn) - cleanmask = _get_cleanmask(task, mcfn) - found = [] - for mask in cleanmask: - for name in glob.glob(mask): - if "sigdata" in name or "sigbasedata" in name: - continue - if name.endswith('.taint'): - continue - if name == current or name == current2: - continue - logger.debug2("Stampfile %s does not match %s or %s" % (name, current, current2)) - found.append(name) - return found - -def write_taint(task, d): - """ - Creates a "taint" file which will force the specified task and its - dependents to be re-run the next time by influencing the value of its - taskhash. - """ - mcfn = d.getVar('BB_FILENAME') - bb.parse.siggen.invalidate_task(task, mcfn) - -def add_tasks(tasklist, d): - task_deps = d.getVar('_task_deps', False) - if not task_deps: - task_deps = {} - if not 'tasks' in task_deps: - task_deps['tasks'] = [] - if not 'parents' in task_deps: - task_deps['parents'] = {} - - for task in tasklist: - task = d.expand(task) - - d.setVarFlag(task, 'task', 1) - - if not task in task_deps['tasks']: - task_deps['tasks'].append(task) - - flags = d.getVarFlags(task) - def getTask(name): - if not name in task_deps: - task_deps[name] = {} - if name in flags: - deptask = d.expand(flags[name]) - if name in ['noexec', 'fakeroot', 'nostamp']: - if deptask != '1': - bb.warn("In a future version of BitBake, setting the '{}' flag to something other than '1' " - "will result in the flag not being set. See YP bug #13808.".format(name)) - - task_deps[name][task] = deptask - getTask('mcdepends') - getTask('depends') - getTask('rdepends') - getTask('deptask') - getTask('rdeptask') - getTask('recrdeptask') - getTask('recideptask') - getTask('nostamp') - getTask('fakeroot') - getTask('noexec') - getTask('umask') - task_deps['parents'][task] = [] - if 'deps' in flags: - for dep in flags['deps']: - # Check and warn for "addtask task after foo" while foo does not exist - #if not dep in tasklist: - # bb.warn('%s: dependent task %s for %s does not exist' % (d.getVar('PN'), dep, task)) - dep = d.expand(dep) - task_deps['parents'][task].append(dep) - - # don't assume holding a reference - d.setVar('_task_deps', task_deps) - -def ensure_task_prefix(name): - if name[:3] != "do_": - name = "do_" + name - return name - -def addtask(task, before, after, d): - task = ensure_task_prefix(task) - - d.setVarFlag(task, "task", 1) - bbtasks = d.getVar('__BBTASKS', False) or [] - if task not in bbtasks: - bbtasks.append(task) - d.setVar('__BBTASKS', bbtasks) - - existing = d.getVarFlag(task, "deps", False) or [] - if after is not None: - # set up deps for function - for entry in after.split(): - entry = ensure_task_prefix(entry) - if entry not in existing: - existing.append(entry) - d.setVarFlag(task, "deps", existing) - if before is not None: - # set up things that depend on this func - for entry in before.split(): - entry = ensure_task_prefix(entry) - existing = d.getVarFlag(entry, "deps", False) or [] - if task not in existing: - d.setVarFlag(entry, "deps", [task] + existing) - -def deltask(task, d): - task = ensure_task_prefix(task) - - bbtasks = d.getVar('__BBTASKS', False) or [] - if task in bbtasks: - bbtasks.remove(task) - d.delVarFlag(task, 'task') - d.setVar('__BBTASKS', bbtasks) - - d.delVarFlag(task, 'deps') - for bbtask in d.getVar('__BBTASKS', False) or []: - deps = d.getVarFlag(bbtask, 'deps', False) or [] - if task in deps: - deps.remove(task) - d.setVarFlag(bbtask, 'deps', deps) - -def preceedtask(task, with_recrdeptasks, d): - """ - Returns a set of tasks in the current recipe which were specified as - precondition by the task itself ("after") or which listed themselves - as precondition ("before"). Preceeding tasks specified via the - "recrdeptask" are included in the result only if requested. Beware - that this may lead to the task itself being listed. - """ - preceed = set() - - # Ignore tasks which don't exist - tasks = d.getVar('__BBTASKS', False) - if task not in tasks: - return preceed - - preceed.update(d.getVarFlag(task, 'deps') or []) - if with_recrdeptasks: - recrdeptask = d.getVarFlag(task, 'recrdeptask') - if recrdeptask: - preceed.update(recrdeptask.split()) - return preceed - -def tasksbetween(task_start, task_end, d): - """ - Return the list of tasks between two tasks in the current recipe, - where task_start is to start at and task_end is the task to end at - (and task_end has a dependency chain back to task_start). - """ - outtasks = [] - tasks = list(filter(lambda k: d.getVarFlag(k, "task"), d.keys())) - def follow_chain(task, endtask, chain=None): - if not chain: - chain = [] - if task in chain: - bb.fatal("Circular task dependencies as %s depends on itself via the chain %s" % (task, " -> ".join(chain))) - chain.append(task) - for othertask in tasks: - if othertask == task: - continue - if task == endtask: - for ctask in chain: - if ctask not in outtasks: - outtasks.append(ctask) - else: - deps = d.getVarFlag(othertask, 'deps', False) - if task in deps: - follow_chain(othertask, endtask, chain) - chain.pop() - follow_chain(task_start, task_end) - return outtasks - -def listtasks(d): - """ - Return the list of tasks in the current recipe. - """ - return tuple(d.getVar('__BBTASKS', False) or ()) diff --git a/bitbake/lib/bb/cache.py b/bitbake/lib/bb/cache.py deleted file mode 100644 index 2361c5684d..0000000000 --- a/bitbake/lib/bb/cache.py +++ /dev/null @@ -1,988 +0,0 @@ -# -# BitBake Cache implementation -# -# Caching of bitbake variables before task execution - -# Copyright (C) 2006 Richard Purdie -# Copyright (C) 2012 Intel Corporation - -# but small sections based on code from bin/bitbake: -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer -# Copyright (C) 2005 Holger Hans Peter Freyther -# Copyright (C) 2005 ROAD GmbH -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import logging -import pickle -from collections import defaultdict -from collections.abc import Mapping -import bb.utils -from bb import PrefixLoggerAdapter -import re -import shutil - -logger = logging.getLogger("BitBake.Cache") - -__cache_version__ = "156" - -def getCacheFile(path, filename, mc, data_hash): - mcspec = '' - if mc: - mcspec = ".%s" % mc - return os.path.join(path, filename + mcspec + "." + data_hash) - -# RecipeInfoCommon defines common data retrieving methods -# from meta data for caches. CoreRecipeInfo as well as other -# Extra RecipeInfo needs to inherit this class -class RecipeInfoCommon(object): - - @classmethod - def listvar(cls, var, metadata): - return cls.getvar(var, metadata).split() - - @classmethod - def intvar(cls, var, metadata): - return int(cls.getvar(var, metadata) or 0) - - @classmethod - def depvar(cls, var, metadata): - return bb.utils.explode_deps(cls.getvar(var, metadata)) - - @classmethod - def pkgvar(cls, var, packages, metadata): - return dict((pkg, cls.depvar("%s:%s" % (var, pkg), metadata)) - for pkg in packages) - - @classmethod - def taskvar(cls, var, tasks, metadata): - return dict((task, cls.getvar("%s:task-%s" % (var, task), metadata)) - for task in tasks) - - @classmethod - def flaglist(cls, flag, varlist, metadata, squash=False): - out_dict = dict((var, metadata.getVarFlag(var, flag)) - for var in varlist) - if squash: - return dict((k,v) for (k,v) in out_dict.items() if v) - else: - return out_dict - - @classmethod - def getvar(cls, var, metadata, expand = True): - return metadata.getVar(var, expand) or '' - - -class CoreRecipeInfo(RecipeInfoCommon): - __slots__ = () - - cachefile = "bb_cache.dat" - - def __init__(self, filename, metadata): - self.file_depends = metadata.getVar('__depends', False) - self.timestamp = bb.parse.cached_mtime(filename) - self.variants = self.listvar('__VARIANTS', metadata) + [''] - self.appends = self.listvar('__BBAPPEND', metadata) - self.nocache = self.getvar('BB_DONT_CACHE', metadata) - - self.provides = self.depvar('PROVIDES', metadata) - self.rprovides = self.depvar('RPROVIDES', metadata) - self.pn = self.getvar('PN', metadata) or bb.parse.vars_from_file(filename,metadata)[0] - self.packages = self.listvar('PACKAGES', metadata) - if not self.packages: - self.packages.append(self.pn) - self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata) - self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata) - - self.skipreason = self.getvar('__SKIPPED', metadata) - if self.skipreason: - self.skipped = True - return - - self.tasks = metadata.getVar('__BBTASKS', False) - - self.basetaskhashes = metadata.getVar('__siggen_basehashes', False) or {} - self.hashfilename = self.getvar('BB_HASHFILENAME', metadata) - - self.task_deps = metadata.getVar('_task_deps', False) or {'tasks': [], 'parents': {}} - - self.skipped = False - self.pe = self.getvar('PE', metadata) - self.pv = self.getvar('PV', metadata) - self.pr = self.getvar('PR', metadata) - self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata) - self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata) - self.stamp = self.getvar('STAMP', metadata) - self.stampclean = self.getvar('STAMPCLEAN', metadata) - self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata) - self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True) - self.depends = self.depvar('DEPENDS', metadata) - self.rdepends = self.depvar('RDEPENDS', metadata) - self.rrecommends = self.depvar('RRECOMMENDS', metadata) - self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata) - self.rrecommends_pkg = self.pkgvar('RRECOMMENDS', self.packages, metadata) - self.inherits = self.getvar('__inherit_cache', metadata, expand=False) - self.fakerootenv = self.getvar('FAKEROOTENV', metadata) - self.fakerootdirs = self.getvar('FAKEROOTDIRS', metadata) - self.fakerootlogs = self.getvar('FAKEROOTLOGS', metadata) - self.fakerootnoenv = self.getvar('FAKEROOTNOENV', metadata) - self.extradepsfunc = self.getvar('calculate_extra_depends', metadata) - - @classmethod - def init_cacheData(cls, cachedata): - # CacheData in Core RecipeInfo Class - cachedata.task_deps = {} - cachedata.pkg_fn = {} - cachedata.pkg_pn = defaultdict(list) - cachedata.pkg_pepvpr = {} - cachedata.pkg_dp = {} - - cachedata.stamp = {} - cachedata.stampclean = {} - cachedata.stamp_extrainfo = {} - cachedata.file_checksums = {} - cachedata.fn_provides = {} - cachedata.pn_provides = defaultdict(list) - cachedata.all_depends = [] - - cachedata.deps = defaultdict(list) - cachedata.packages = defaultdict(list) - cachedata.providers = defaultdict(list) - cachedata.rproviders = defaultdict(list) - cachedata.packages_dynamic = defaultdict(list) - - cachedata.rundeps = defaultdict(lambda: defaultdict(list)) - cachedata.runrecs = defaultdict(lambda: defaultdict(list)) - cachedata.possible_world = [] - cachedata.universe_target = [] - cachedata.hashfn = {} - - cachedata.basetaskhash = {} - cachedata.inherits = {} - cachedata.fakerootenv = {} - cachedata.fakerootnoenv = {} - cachedata.fakerootdirs = {} - cachedata.fakerootlogs = {} - cachedata.extradepsfunc = {} - - def add_cacheData(self, cachedata, fn): - cachedata.task_deps[fn] = self.task_deps - cachedata.pkg_fn[fn] = self.pn - cachedata.pkg_pn[self.pn].append(fn) - cachedata.pkg_pepvpr[fn] = (self.pe, self.pv, self.pr) - cachedata.pkg_dp[fn] = self.defaultpref - cachedata.stamp[fn] = self.stamp - cachedata.stampclean[fn] = self.stampclean - cachedata.stamp_extrainfo[fn] = self.stamp_extrainfo - cachedata.file_checksums[fn] = self.file_checksums - - provides = [self.pn] - for provide in self.provides: - if provide not in provides: - provides.append(provide) - cachedata.fn_provides[fn] = provides - - for provide in provides: - cachedata.providers[provide].append(fn) - if provide not in cachedata.pn_provides[self.pn]: - cachedata.pn_provides[self.pn].append(provide) - - for dep in self.depends: - if dep not in cachedata.deps[fn]: - cachedata.deps[fn].append(dep) - if dep not in cachedata.all_depends: - cachedata.all_depends.append(dep) - - rprovides = self.rprovides - for package in self.packages: - cachedata.packages[package].append(fn) - rprovides += self.rprovides_pkg[package] - - for rprovide in rprovides: - if fn not in cachedata.rproviders[rprovide]: - cachedata.rproviders[rprovide].append(fn) - - for package in self.packages_dynamic: - cachedata.packages_dynamic[package].append(fn) - - # Build hash of runtime depends and recommends - for package in self.packages: - cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package] - cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package] - - # Collect files we may need for possible world-dep - # calculations - if not bb.utils.to_boolean(self.not_world): - cachedata.possible_world.append(fn) - #else: - # logger.debug2("EXCLUDE FROM WORLD: %s", fn) - - # create a collection of all targets for sanity checking - # tasks, such as upstream versions, license, and tools for - # task and image creation. - cachedata.universe_target.append(self.pn) - - cachedata.hashfn[fn] = self.hashfilename - for task, taskhash in self.basetaskhashes.items(): - identifier = '%s:%s' % (fn, task) - cachedata.basetaskhash[identifier] = taskhash - - cachedata.inherits[fn] = self.inherits - cachedata.fakerootenv[fn] = self.fakerootenv - cachedata.fakerootnoenv[fn] = self.fakerootnoenv - cachedata.fakerootdirs[fn] = self.fakerootdirs - cachedata.fakerootlogs[fn] = self.fakerootlogs - cachedata.extradepsfunc[fn] = self.extradepsfunc - - -class SiggenRecipeInfo(RecipeInfoCommon): - __slots__ = () - - classname = "SiggenRecipeInfo" - cachefile = "bb_cache_" + classname +".dat" - # we don't want to show this information in graph files so don't set cachefields - #cachefields = [] - - def __init__(self, filename, metadata): - self.siggen_gendeps = metadata.getVar("__siggen_gendeps", False) - self.siggen_varvals = metadata.getVar("__siggen_varvals", False) - self.siggen_taskdeps = metadata.getVar("__siggen_taskdeps", False) - - @classmethod - def init_cacheData(cls, cachedata): - cachedata.siggen_taskdeps = {} - cachedata.siggen_gendeps = {} - cachedata.siggen_varvals = {} - - def add_cacheData(self, cachedata, fn): - cachedata.siggen_gendeps[fn] = self.siggen_gendeps - cachedata.siggen_varvals[fn] = self.siggen_varvals - cachedata.siggen_taskdeps[fn] = self.siggen_taskdeps - - # The siggen variable data is large and impacts: - # - bitbake's overall memory usage - # - the amount of data sent over IPC between parsing processes and the server - # - the size of the cache files on disk - # - the size of "sigdata" hash information files on disk - # The data consists of strings (some large) or frozenset lists of variables - # As such, we a) deplicate the data here and b) pass references to the object at second - # access (e.g. over IPC or saving into pickle). - - store = {} - save_map = {} - save_count = 1 - restore_map = {} - restore_count = {} - - @classmethod - def reset(cls): - # Needs to be called before starting new streamed data in a given process - # (e.g. writing out the cache again) - cls.save_map = {} - cls.save_count = 1 - cls.restore_map = {} - - @classmethod - def _save(cls, deps): - ret = [] - if not deps: - return deps - for dep in deps: - fs = deps[dep] - if fs is None: - ret.append((dep, None, None)) - elif fs in cls.save_map: - ret.append((dep, None, cls.save_map[fs])) - else: - cls.save_map[fs] = cls.save_count - ret.append((dep, fs, cls.save_count)) - cls.save_count = cls.save_count + 1 - return ret - - @classmethod - def _restore(cls, deps, pid): - ret = {} - if not deps: - return deps - if pid not in cls.restore_map: - cls.restore_map[pid] = {} - map = cls.restore_map[pid] - for dep, fs, mapnum in deps: - if fs is None and mapnum is None: - ret[dep] = None - elif fs is None: - ret[dep] = map[mapnum] - else: - try: - fs = cls.store[fs] - except KeyError: - cls.store[fs] = fs - map[mapnum] = fs - ret[dep] = fs - return ret - - def __getstate__(self): - ret = {} - for key in ["siggen_gendeps", "siggen_taskdeps", "siggen_varvals"]: - ret[key] = self._save(self.__dict__[key]) - ret['pid'] = os.getpid() - return ret - - def __setstate__(self, state): - pid = state['pid'] - for key in ["siggen_gendeps", "siggen_taskdeps", "siggen_varvals"]: - setattr(self, key, self._restore(state[key], pid)) - - -def virtualfn2realfn(virtualfn): - """ - Convert a virtual file name to a real one + the associated subclass keyword - """ - mc = "" - if virtualfn.startswith('mc:') and virtualfn.count(':') >= 2: - (_, mc, virtualfn) = virtualfn.split(':', 2) - - fn = virtualfn - cls = "" - if virtualfn.startswith('virtual:'): - elems = virtualfn.split(':') - cls = ":".join(elems[1:-1]) - fn = elems[-1] - - return (fn, cls, mc) - -def realfn2virtual(realfn, cls, mc): - """ - Convert a real filename + the associated subclass keyword to a virtual filename - """ - if cls: - realfn = "virtual:" + cls + ":" + realfn - if mc: - realfn = "mc:" + mc + ":" + realfn - return realfn - -def variant2virtual(realfn, variant): - """ - Convert a real filename + a variant to a virtual filename - """ - if variant == "": - return realfn - if variant.startswith("mc:") and variant.count(':') >= 2: - elems = variant.split(":") - if elems[2]: - return "mc:" + elems[1] + ":virtual:" + ":".join(elems[2:]) + ":" + realfn - return "mc:" + elems[1] + ":" + realfn - return "virtual:" + variant + ":" + realfn - -# -# Cooker calls cacheValid on its recipe list, then either calls loadCached -# from it's main thread or parse from separate processes to generate an up to -# date cache -# -class Cache(object): - """ - BitBake Cache implementation - """ - def __init__(self, databuilder, mc, data_hash, caches_array): - self.databuilder = databuilder - self.data = databuilder.data - - # Pass caches_array information into Cache Constructor - # It will be used later for deciding whether we - # need extra cache file dump/load support - self.mc = mc - self.logger = PrefixLoggerAdapter("Cache: %s: " % (mc if mc else ''), logger) - self.caches_array = caches_array - self.cachedir = self.data.getVar("CACHE") - self.clean = set() - self.checked = set() - self.depends_cache = {} - self.data_fn = None - self.cacheclean = True - self.data_hash = data_hash - self.filelist_regex = re.compile(r'(?:(?<=:True)|(?<=:False))\s+') - - if self.cachedir in [None, '']: - bb.fatal("Please ensure CACHE is set to the cache directory for BitBake to use") - - def getCacheFile(self, cachefile): - return getCacheFile(self.cachedir, cachefile, self.mc, self.data_hash) - - def prepare_cache(self, progress): - loaded = 0 - - self.cachefile = self.getCacheFile("bb_cache.dat") - - self.logger.debug("Cache dir: %s", self.cachedir) - bb.utils.mkdirhier(self.cachedir) - - cache_ok = True - if self.caches_array: - for cache_class in self.caches_array: - cachefile = self.getCacheFile(cache_class.cachefile) - cache_exists = os.path.exists(cachefile) - self.logger.debug2("Checking if %s exists: %r", cachefile, cache_exists) - cache_ok = cache_ok and cache_exists - cache_class.init_cacheData(self) - if cache_ok: - loaded = self.load_cachefile(progress) - elif os.path.isfile(self.cachefile): - self.logger.info("Out of date cache found, rebuilding...") - else: - self.logger.debug("Cache file %s not found, building..." % self.cachefile) - - # We don't use the symlink, its just for debugging convinience - if self.mc: - symlink = os.path.join(self.cachedir, "bb_cache.dat.%s" % self.mc) - else: - symlink = os.path.join(self.cachedir, "bb_cache.dat") - - if os.path.exists(symlink) or os.path.islink(symlink): - bb.utils.remove(symlink) - try: - os.symlink(os.path.basename(self.cachefile), symlink) - except OSError: - pass - - return loaded - - def cachesize(self): - cachesize = 0 - for cache_class in self.caches_array: - cachefile = self.getCacheFile(cache_class.cachefile) - try: - with open(cachefile, "rb") as cachefile: - cachesize += os.fstat(cachefile.fileno()).st_size - except FileNotFoundError: - pass - - return cachesize - - def load_cachefile(self, progress): - previous_progress = 0 - - for cache_class in self.caches_array: - cachefile = self.getCacheFile(cache_class.cachefile) - self.logger.debug('Loading cache file: %s' % cachefile) - with open(cachefile, "rb") as cachefile: - pickled = pickle.Unpickler(cachefile) - # Check cache version information - try: - cache_ver = pickled.load() - bitbake_ver = pickled.load() - except Exception: - self.logger.info('Invalid cache, rebuilding...') - return 0 - - if cache_ver != __cache_version__: - self.logger.info('Cache version mismatch, rebuilding...') - return 0 - elif bitbake_ver != bb.__version__: - self.logger.info('Bitbake version mismatch, rebuilding...') - return 0 - - # Load the rest of the cache file - current_progress = 0 - while cachefile: - try: - key = pickled.load() - value = pickled.load() - except Exception: - break - if not isinstance(key, str): - bb.warn("%s from extras cache is not a string?" % key) - break - if not isinstance(value, RecipeInfoCommon): - bb.warn("%s from extras cache is not a RecipeInfoCommon class?" % value) - break - - if key in self.depends_cache: - self.depends_cache[key].append(value) - else: - self.depends_cache[key] = [value] - # only fire events on even percentage boundaries - current_progress = cachefile.tell() + previous_progress - progress(cachefile.tell() + previous_progress) - - previous_progress += current_progress - - return len(self.depends_cache) - - def parse(self, filename, appends, layername): - """Parse the specified filename, returning the recipe information""" - self.logger.debug("Parsing %s", filename) - infos = [] - datastores = self.databuilder.parseRecipeVariants(filename, appends, mc=self.mc, layername=layername) - depends = [] - variants = [] - # Process the "real" fn last so we can store variants list - for variant, data in sorted(datastores.items(), - key=lambda i: i[0], - reverse=True): - virtualfn = variant2virtual(filename, variant) - variants.append(variant) - depends = depends + (data.getVar("__depends", False) or []) - if depends and not variant: - data.setVar("__depends", depends) - if virtualfn == filename: - data.setVar("__VARIANTS", " ".join(variants)) - info_array = [] - for cache_class in self.caches_array: - info = cache_class(filename, data) - info_array.append(info) - infos.append((virtualfn, info_array)) - - return infos - - def loadCached(self, filename, appends): - """Obtain the recipe information for the specified filename, - using cached values. - """ - - infos = [] - # info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo] - info_array = self.depends_cache[filename] - for variant in info_array[0].variants: - virtualfn = variant2virtual(filename, variant) - infos.append((virtualfn, self.depends_cache[virtualfn])) - - return infos - - def cacheValid(self, fn, appends): - """ - Is the cache valid for fn? - Fast version, no timestamps checked. - """ - if fn not in self.checked: - self.cacheValidUpdate(fn, appends) - if fn in self.clean: - return True - return False - - def cacheValidUpdate(self, fn, appends): - """ - Is the cache valid for fn? - Make thorough (slower) checks including timestamps. - """ - self.checked.add(fn) - - # File isn't in depends_cache - if not fn in self.depends_cache: - self.logger.debug2("%s is not cached", fn) - return False - - mtime = bb.parse.cached_mtime_noerror(fn) - - # Check file still exists - if mtime == 0: - self.logger.debug2("%s no longer exists", fn) - self.remove(fn) - return False - - info_array = self.depends_cache[fn] - # Check the file's timestamp - if mtime != info_array[0].timestamp: - self.logger.debug2("%s changed", fn) - self.remove(fn) - return False - - # Check dependencies are still valid - depends = info_array[0].file_depends - if depends: - for f, old_mtime in depends: - fmtime = bb.parse.cached_mtime_noerror(f) - # Check if file still exists - if old_mtime != 0 and fmtime == 0: - self.logger.debug2("%s's dependency %s was removed", - fn, f) - self.remove(fn) - return False - - if (fmtime != old_mtime): - self.logger.debug2("%s's dependency %s changed", - fn, f) - self.remove(fn) - return False - - if hasattr(info_array[0], 'file_checksums'): - for _, fl in info_array[0].file_checksums.items(): - fl = fl.strip() - if not fl: - continue - # Have to be careful about spaces and colons in filenames - flist = self.filelist_regex.split(fl) - for f in flist: - if not f: - continue - f, exist = f.rsplit(":", 1) - if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)): - self.logger.debug2("%s's file checksum list file %s changed", - fn, f) - self.remove(fn) - return False - - if tuple(appends) != tuple(info_array[0].appends): - self.logger.debug2("appends for %s changed", fn) - self.logger.debug2("%s to %s" % (str(appends), str(info_array[0].appends))) - self.remove(fn) - return False - - invalid = False - for cls in info_array[0].variants: - virtualfn = variant2virtual(fn, cls) - self.clean.add(virtualfn) - if virtualfn not in self.depends_cache: - self.logger.debug2("%s is not cached", virtualfn) - invalid = True - elif len(self.depends_cache[virtualfn]) != len(self.caches_array): - self.logger.debug2("Extra caches missing for %s?" % virtualfn) - invalid = True - - # If any one of the variants is not present, mark as invalid for all - if invalid: - for cls in info_array[0].variants: - virtualfn = variant2virtual(fn, cls) - if virtualfn in self.clean: - self.logger.debug2("Removing %s from cache", virtualfn) - self.clean.remove(virtualfn) - if fn in self.clean: - self.logger.debug2("Marking %s as not clean", fn) - self.clean.remove(fn) - return False - - self.clean.add(fn) - return True - - def remove(self, fn): - """ - Remove a fn from the cache - Called from the parser in error cases - """ - if fn in self.depends_cache: - self.logger.debug("Removing %s from cache", fn) - del self.depends_cache[fn] - if fn in self.clean: - self.logger.debug("Marking %s as unclean", fn) - self.clean.remove(fn) - - def sync(self): - """ - Save the cache - Called from the parser when complete (or exiting) - """ - if self.cacheclean: - self.logger.debug2("Cache is clean, not saving.") - return - - for cache_class in self.caches_array: - cache_class_name = cache_class.__name__ - cachefile = self.getCacheFile(cache_class.cachefile) - self.logger.debug2("Writing %s", cachefile) - with open(cachefile, "wb") as f: - p = pickle.Pickler(f, pickle.HIGHEST_PROTOCOL) - p.dump(__cache_version__) - p.dump(bb.__version__) - - for key, info_array in self.depends_cache.items(): - for info in info_array: - if isinstance(info, RecipeInfoCommon) and info.__class__.__name__ == cache_class_name: - p.dump(key) - p.dump(info) - - del self.depends_cache - SiggenRecipeInfo.reset() - - @staticmethod - def mtime(cachefile): - return bb.parse.cached_mtime_noerror(cachefile) - - def add_info(self, filename, info_array, cacheData, parsed=None, watcher=None): - if self.mc is not None: - (fn, cls, mc) = virtualfn2realfn(filename) - if mc: - self.logger.error("Unexpected multiconfig %s", filename) - return - - vfn = realfn2virtual(fn, cls, self.mc) - else: - vfn = filename - - if isinstance(info_array[0], CoreRecipeInfo) and (not info_array[0].skipped): - cacheData.add_from_recipeinfo(vfn, info_array) - - if watcher: - watcher(info_array[0].file_depends) - - if (info_array[0].skipped or 'SRCREVINACTION' not in info_array[0].pv) and not info_array[0].nocache: - if parsed: - self.cacheclean = False - self.depends_cache[filename] = info_array - -class MulticonfigCache(Mapping): - def __init__(self, databuilder, data_hash, caches_array): - def progress(p): - nonlocal current_progress - nonlocal previous_progress - nonlocal previous_percent - nonlocal cachesize - - current_progress = previous_progress + p - - if current_progress > cachesize: - # we might have calculated incorrect total size because a file - # might've been written out just after we checked its size - cachesize = current_progress - current_percent = 100 * current_progress / cachesize - if current_percent > previous_percent: - previous_percent = current_percent - bb.event.fire(bb.event.CacheLoadProgress(current_progress, cachesize), - databuilder.data) - - - cachesize = 0 - current_progress = 0 - previous_progress = 0 - previous_percent = 0 - self.__caches = {} - - for mc, mcdata in databuilder.mcdata.items(): - self.__caches[mc] = Cache(databuilder, mc, data_hash, caches_array) - - cachesize += self.__caches[mc].cachesize() - - bb.event.fire(bb.event.CacheLoadStarted(cachesize), databuilder.data) - loaded = 0 - - for c in self.__caches.values(): - SiggenRecipeInfo.reset() - loaded += c.prepare_cache(progress) - previous_progress = current_progress - - # Note: depends cache number is corresponding to the parsing file numbers. - # The same file has several caches, still regarded as one item in the cache - bb.event.fire(bb.event.CacheLoadCompleted(cachesize, loaded), databuilder.data) - - def __len__(self): - return len(self.__caches) - - def __getitem__(self, key): - return self.__caches[key] - - def __contains__(self, key): - return key in self.__caches - - def __iter__(self): - for k in self.__caches: - yield k - - -class CacheData(object): - """ - The data structures we compile from the cached data - """ - - def __init__(self, caches_array): - self.caches_array = caches_array - for cache_class in self.caches_array: - if not issubclass(cache_class, RecipeInfoCommon): - bb.error("Extra cache data class %s should subclass RecipeInfoCommon class" % cache_class) - cache_class.init_cacheData(self) - - # Direct cache variables - self.task_queues = {} - self.preferred = {} - self.tasks = {} - # Indirect Cache variables (set elsewhere) - self.ignored_dependencies = [] - self.world_target = set() - self.bbfile_priority = {} - - def add_from_recipeinfo(self, fn, info_array): - for info in info_array: - info.add_cacheData(self, fn) - -class MultiProcessCache(object): - """ - BitBake multi-process cache implementation - - Used by the codeparser & file checksum caches - """ - - def __init__(self): - self.cachefile = None - self.cachedata = self.create_cachedata() - self.cachedata_extras = self.create_cachedata() - - def init_cache(self, cachedir, cache_file_name=None): - if not cachedir: - return - - bb.utils.mkdirhier(cachedir) - self.cachefile = os.path.join(cachedir, - cache_file_name or self.__class__.cache_file_name) - logger.debug("Using cache in '%s'", self.cachefile) - - glf = bb.utils.lockfile(self.cachefile + ".lock") - - try: - with open(self.cachefile, "rb") as f: - p = pickle.Unpickler(f) - data, version = p.load() - except: - bb.utils.unlockfile(glf) - return - - bb.utils.unlockfile(glf) - - if version != self.__class__.CACHE_VERSION: - return - - self.cachedata = data - - def create_cachedata(self): - data = [{}] - return data - - def clear_cache(self): - if not self.cachefile: - bb.fatal("Can't clear invalid cachefile") - - self.cachedata = self.create_cachedata() - self.cachedata_extras = self.create_cachedata() - with bb.utils.fileslocked([self.cachefile + ".lock"]): - bb.utils.remove(self.cachefile) - bb.utils.remove(self.cachefile + "-*") - - def save_extras(self): - if not self.cachefile: - return - - have_data = any(self.cachedata_extras) - if not have_data: - return - - glf = bb.utils.lockfile(self.cachefile + ".lock", shared=True) - - i = os.getpid() - lf = None - while not lf: - lf = bb.utils.lockfile(self.cachefile + ".lock." + str(i), retry=False) - if not lf or os.path.exists(self.cachefile + "-" + str(i)): - if lf: - bb.utils.unlockfile(lf) - lf = None - i = i + 1 - continue - - with open(self.cachefile + "-" + str(i), "wb") as f: - p = pickle.Pickler(f, -1) - p.dump([self.cachedata_extras, self.__class__.CACHE_VERSION]) - - bb.utils.unlockfile(lf) - bb.utils.unlockfile(glf) - - def merge_data(self, source, dest): - for j in range(0,len(dest)): - for h in source[j]: - if h not in dest[j]: - dest[j][h] = source[j][h] - - def save_merge(self): - if not self.cachefile: - return - - glf = bb.utils.lockfile(self.cachefile + ".lock") - - data = self.cachedata - - have_data = False - - for f in [y for y in os.listdir(os.path.dirname(self.cachefile)) if y.startswith(os.path.basename(self.cachefile) + '-')]: - f = os.path.join(os.path.dirname(self.cachefile), f) - try: - with open(f, "rb") as fd: - p = pickle.Unpickler(fd) - extradata, version = p.load() - except (IOError, EOFError): - os.unlink(f) - continue - - if version != self.__class__.CACHE_VERSION: - os.unlink(f) - continue - - have_data = True - self.merge_data(extradata, data) - os.unlink(f) - - if have_data: - with open(self.cachefile, "wb") as f: - p = pickle.Pickler(f, -1) - p.dump([data, self.__class__.CACHE_VERSION]) - - bb.utils.unlockfile(glf) - - -class SimpleCache(object): - """ - BitBake multi-process cache implementation - - Used by the codeparser & file checksum caches - """ - - def __init__(self, version): - self.cachefile = None - self.cachedata = None - self.cacheversion = version - - def init_cache(self, d, cache_file_name=None, defaultdata=None): - cachedir = (d.getVar("PERSISTENT_DIR") or - d.getVar("CACHE")) - if not cachedir: - return defaultdata - - bb.utils.mkdirhier(cachedir) - self.cachefile = os.path.join(cachedir, - cache_file_name or self.__class__.cache_file_name) - logger.debug("Using cache in '%s'", self.cachefile) - - glf = bb.utils.lockfile(self.cachefile + ".lock") - - try: - with open(self.cachefile, "rb") as f: - p = pickle.Unpickler(f) - data, version = p.load() - except: - bb.utils.unlockfile(glf) - return defaultdata - - bb.utils.unlockfile(glf) - - if version != self.cacheversion: - return defaultdata - - return data - - def save(self, data): - if not self.cachefile: - return - - glf = bb.utils.lockfile(self.cachefile + ".lock") - - with open(self.cachefile, "wb") as f: - p = pickle.Pickler(f, -1) - p.dump([data, self.cacheversion]) - - bb.utils.unlockfile(glf) - - def copyfile(self, target): - if not self.cachefile: - return - - glf = bb.utils.lockfile(self.cachefile + ".lock") - shutil.copy(self.cachefile, target) - bb.utils.unlockfile(glf) diff --git a/bitbake/lib/bb/cache_extra.py b/bitbake/lib/bb/cache_extra.py deleted file mode 100644 index bf4226d168..0000000000 --- a/bitbake/lib/bb/cache_extra.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Extra RecipeInfo will be all defined in this file. Currently, -# Only Hob (Image Creator) Requests some extra fields. So -# HobRecipeInfo is defined. It's named HobRecipeInfo because it -# is introduced by 'hob'. Users could also introduce other -# RecipeInfo or simply use those already defined RecipeInfo. -# In the following patch, this newly defined new extra RecipeInfo -# will be dynamically loaded and used for loading/saving the extra -# cache fields - -# Copyright (C) 2011, Intel Corporation. All rights reserved. - -# SPDX-License-Identifier: GPL-2.0-only -# - -from bb.cache import RecipeInfoCommon - -class HobRecipeInfo(RecipeInfoCommon): - __slots__ = () - - classname = "HobRecipeInfo" - # please override this member with the correct data cache file - # such as (bb_cache.dat, bb_extracache_hob.dat) - cachefile = "bb_extracache_" + classname +".dat" - - # override this member with the list of extra cache fields - # that this class will provide - cachefields = ['summary', 'license', 'section', - 'description', 'homepage', 'bugtracker', - 'prevision', 'files_info'] - - def __init__(self, filename, metadata): - - self.summary = self.getvar('SUMMARY', metadata) - self.license = self.getvar('LICENSE', metadata) - self.section = self.getvar('SECTION', metadata) - self.description = self.getvar('DESCRIPTION', metadata) - self.homepage = self.getvar('HOMEPAGE', metadata) - self.bugtracker = self.getvar('BUGTRACKER', metadata) - self.prevision = self.getvar('PR', metadata) - self.files_info = self.getvar('FILES_INFO', metadata) - - @classmethod - def init_cacheData(cls, cachedata): - # CacheData in Hob RecipeInfo Class - cachedata.summary = {} - cachedata.license = {} - cachedata.section = {} - cachedata.description = {} - cachedata.homepage = {} - cachedata.bugtracker = {} - cachedata.prevision = {} - cachedata.files_info = {} - - def add_cacheData(self, cachedata, fn): - cachedata.summary[fn] = self.summary - cachedata.license[fn] = self.license - cachedata.section[fn] = self.section - cachedata.description[fn] = self.description - cachedata.homepage[fn] = self.homepage - cachedata.bugtracker[fn] = self.bugtracker - cachedata.prevision[fn] = self.prevision - cachedata.files_info[fn] = self.files_info diff --git a/bitbake/lib/bb/checksum.py b/bitbake/lib/bb/checksum.py deleted file mode 100644 index 3fb39a303e..0000000000 --- a/bitbake/lib/bb/checksum.py +++ /dev/null @@ -1,169 +0,0 @@ -# Local file checksum cache implementation -# -# Copyright (C) 2012 Intel Corporation -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import glob -import operator -import os -import stat -import bb.utils -import logging -import re -from bb.cache import MultiProcessCache - -logger = logging.getLogger("BitBake.Cache") - -filelist_regex = re.compile(r'(?:(?<=:True)|(?<=:False))\s+') - -# mtime cache (non-persistent) -# based upon the assumption that files do not change during bitbake run -class FileMtimeCache(object): - cache = {} - - def cached_mtime(self, f): - if f not in self.cache: - self.cache[f] = os.stat(f)[stat.ST_MTIME] - return self.cache[f] - - def cached_mtime_noerror(self, f): - if f not in self.cache: - try: - self.cache[f] = os.stat(f)[stat.ST_MTIME] - except OSError: - return 0 - return self.cache[f] - - def update_mtime(self, f): - self.cache[f] = os.stat(f)[stat.ST_MTIME] - return self.cache[f] - - def clear(self): - self.cache.clear() - -# Checksum + mtime cache (persistent) -class FileChecksumCache(MultiProcessCache): - cache_file_name = "local_file_checksum_cache.dat" - CACHE_VERSION = 1 - - def __init__(self): - self.mtime_cache = FileMtimeCache() - MultiProcessCache.__init__(self) - - def get_checksum(self, f): - f = os.path.normpath(f) - entry = self.cachedata[0].get(f) - cmtime = self.mtime_cache.cached_mtime(f) - if entry: - (mtime, hashval) = entry - if cmtime == mtime: - return hashval - else: - bb.debug(2, "file %s changed mtime, recompute checksum" % f) - - hashval = bb.utils.md5_file(f) - self.cachedata_extras[0][f] = (cmtime, hashval) - return hashval - - def merge_data(self, source, dest): - for h in source[0]: - if h in dest: - (smtime, _) = source[0][h] - (dmtime, _) = dest[0][h] - if smtime > dmtime: - dest[0][h] = source[0][h] - else: - dest[0][h] = source[0][h] - - def get_checksums(self, filelist, pn, localdirsexclude): - """Get checksums for a list of files""" - - def checksum_file(f): - try: - checksum = self.get_checksum(f) - except OSError as e: - bb.warn("Unable to get checksum for %s SRC_URI entry %s: %s" % (pn, os.path.basename(f), e)) - return None - return checksum - - # - # Changing the format of file-checksums is problematic as both OE and Bitbake have - # knowledge of them. We need to encode a new piece of data, the portion of the path - # we care about from a checksum perspective. This means that files that change subdirectory - # are tracked by the task hashes. To do this, we do something horrible and put a "/./" into - # the path. The filesystem handles it but it gives us a marker to know which subsection - # of the path to cache. - # - def checksum_dir(pth): - # Handle directories recursively - if pth == "/": - bb.fatal("Refusing to checksum /") - pth = pth.rstrip("/") - dirchecksums = [] - for root, dirs, files in os.walk(pth, topdown=True): - [dirs.remove(d) for d in list(dirs) if d in localdirsexclude] - for name in files: - fullpth = os.path.join(root, name).replace(pth, os.path.join(pth, ".")) - checksum = checksum_file(fullpth) - if checksum: - dirchecksums.append((fullpth, checksum)) - return dirchecksums - - checksums = [] - for pth in filelist_regex.split(filelist): - if not pth: - continue - pth = pth.strip() - if not pth: - continue - exist = pth.split(":")[1] - if exist == "False": - continue - pth = pth.split(":")[0] - if '*' in pth: - # Handle globs - for f in glob.glob(pth): - if os.path.isdir(f): - if not os.path.islink(f): - checksums.extend(checksum_dir(f)) - else: - checksum = checksum_file(f) - if checksum: - checksums.append((f, checksum)) - elif os.path.isdir(pth): - if not os.path.islink(pth): - checksums.extend(checksum_dir(pth)) - else: - checksum = checksum_file(pth) - if checksum: - checksums.append((pth, checksum)) - - checksums.sort(key=operator.itemgetter(1)) - return checksums - -class RevisionsCache(MultiProcessCache): - cache_file_name = "local_srcrevisions.dat" - CACHE_VERSION = 1 - - def __init__(self): - MultiProcessCache.__init__(self) - - def get_revs(self): - return self.cachedata[0] - - def get_rev(self, k): - if k in self.cachedata_extras[0]: - return self.cachedata_extras[0][k] - if k in self.cachedata[0]: - return self.cachedata[0][k] - return None - - def set_rev(self, k, v): - self.cachedata[0][k] = v - self.cachedata_extras[0][k] = v - - def merge_data(self, source, dest): - for h in source[0]: - dest[0][h] = source[0][h] diff --git a/bitbake/lib/bb/codeparser.py b/bitbake/lib/bb/codeparser.py deleted file mode 100644 index 4f70cf7fe7..0000000000 --- a/bitbake/lib/bb/codeparser.py +++ /dev/null @@ -1,566 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -""" -BitBake code parser - -Parses actual code (i.e. python and shell) for functions and in-line -expressions. Used mainly to determine dependencies on other functions -and variables within the BitBake metadata. Also provides a cache for -this information in order to speed up processing. - -(Not to be confused with the code that parses the metadata itself, -see lib/bb/parse/ for that). - -NOTE: if you change how the parsers gather information you will almost -certainly need to increment CodeParserCache.CACHE_VERSION below so that -any existing codeparser cache gets invalidated. Additionally you'll need -to increment __cache_version__ in cache.py in order to ensure that old -recipe caches don't trigger "Taskhash mismatch" errors. - -""" - -import ast -import sys -import codegen -import logging -import inspect -import bb.pysh as pysh -import bb.utils, bb.data -import hashlib -from itertools import chain -from bb.pysh import pyshyacc, pyshlex -from bb.cache import MultiProcessCache - -logger = logging.getLogger('BitBake.CodeParser') - -def bbhash(s): - return hashlib.sha256(s.encode("utf-8")).hexdigest() - -def check_indent(codestr): - """If the code is indented, add a top level piece of code to 'remove' the indentation""" - - i = 0 - while codestr[i] in ["\n", "\t", " "]: - i = i + 1 - - if i == 0: - return codestr - - if codestr[i-1] == "\t" or codestr[i-1] == " ": - if codestr[0] == "\n": - # Since we're adding a line, we need to remove one line of any empty padding - # to ensure line numbers are correct - codestr = codestr[1:] - return "if 1:\n" + codestr - - return codestr - -modulecode_deps = {} - -def add_module_functions(fn, functions, namespace): - import os - fstat = os.stat(fn) - fixedhash = fn + ":" + str(fstat.st_size) + ":" + str(fstat.st_mtime) - for f in functions: - name = "%s.%s" % (namespace, f) - parser = PythonParser(name, logger) - try: - parser.parse_python(None, filename=fn, lineno=1, fixedhash=fixedhash+f, func=functions[f]) - #bb.warn("Cached %s" % f) - except KeyError: - try: - targetfn = inspect.getsourcefile(functions[f]) - except TypeError: - # Builtin - continue - if fn != targetfn: - # Skip references to other modules outside this file - #bb.warn("Skipping %s" % name) - continue - try: - lines, lineno = inspect.getsourcelines(functions[f]) - except TypeError: - # Builtin - continue - src = "".join(lines) - parser.parse_python(src, filename=fn, lineno=lineno, fixedhash=fixedhash+f, func=functions[f]) - #bb.warn("Not cached %s" % f) - execs = parser.execs.copy() - # Expand internal module exec references - for e in parser.execs: - if e in functions: - execs.remove(e) - execs.add(namespace + "." + e) - visitorcode = None - if hasattr(functions[f], 'visitorcode'): - visitorcode = getattr(functions[f], "visitorcode") - modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy(), parser.extra, visitorcode] - #bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, fn, parser.references, parser.execs, parser.var_execs, parser.contains)) - -def update_module_dependencies(d): - for mod in modulecode_deps: - excludes = set((d.getVarFlag(mod, "vardepsexclude") or "").split()) - if excludes: - modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3], modulecode_deps[mod][4], modulecode_deps[mod][5]] - -# A custom getstate/setstate using tuples is actually worth 15% cachesize by -# avoiding duplication of the attribute names! -class SetCache(object): - def __init__(self): - self.setcache = {} - - def internSet(self, items): - - new = [] - for i in items: - new.append(sys.intern(i)) - s = frozenset(new) - h = hash(s) - if h in self.setcache: - return self.setcache[h] - self.setcache[h] = s - return s - -codecache = SetCache() - -class pythonCacheLine(object): - def __init__(self, refs, execs, contains, extra): - self.refs = codecache.internSet(refs) - self.execs = codecache.internSet(execs) - self.contains = {} - for c in contains: - self.contains[c] = codecache.internSet(contains[c]) - self.extra = extra - - def __getstate__(self): - return (self.refs, self.execs, self.contains, self.extra) - - def __setstate__(self, state): - (refs, execs, contains, extra) = state - self.__init__(refs, execs, contains, extra) - def __hash__(self): - l = (hash(self.refs), hash(self.execs), hash(self.extra)) - for c in sorted(self.contains.keys()): - l = l + (c, hash(self.contains[c])) - return hash(l) - def __repr__(self): - return " ".join([str(self.refs), str(self.execs), str(self.contains)]) - - -class shellCacheLine(object): - def __init__(self, execs): - self.execs = codecache.internSet(execs) - - def __getstate__(self): - return (self.execs) - - def __setstate__(self, state): - (execs) = state - self.__init__(execs) - def __hash__(self): - return hash(self.execs) - def __repr__(self): - return str(self.execs) - -class CodeParserCache(MultiProcessCache): - cache_file_name = "bb_codeparser.dat" - # NOTE: you must increment this if you change how the parsers gather information, - # so that an existing cache gets invalidated. Additionally you'll need - # to increment __cache_version__ in cache.py in order to ensure that old - # recipe caches don't trigger "Taskhash mismatch" errors. - CACHE_VERSION = 14 - - def __init__(self): - MultiProcessCache.__init__(self) - self.pythoncache = self.cachedata[0] - self.shellcache = self.cachedata[1] - self.pythoncacheextras = self.cachedata_extras[0] - self.shellcacheextras = self.cachedata_extras[1] - - # To avoid duplication in the codeparser cache, keep - # a lookup of hashes of objects we already have - self.pythoncachelines = {} - self.shellcachelines = {} - - def newPythonCacheLine(self, refs, execs, contains, extra): - cacheline = pythonCacheLine(refs, execs, contains, extra) - h = hash(cacheline) - if h in self.pythoncachelines: - return self.pythoncachelines[h] - self.pythoncachelines[h] = cacheline - return cacheline - - def newShellCacheLine(self, execs): - cacheline = shellCacheLine(execs) - h = hash(cacheline) - if h in self.shellcachelines: - return self.shellcachelines[h] - self.shellcachelines[h] = cacheline - return cacheline - - def init_cache(self, cachedir): - # Check if we already have the caches - if self.pythoncache: - return - - MultiProcessCache.init_cache(self, cachedir) - - # cachedata gets re-assigned in the parent - self.pythoncache = self.cachedata[0] - self.shellcache = self.cachedata[1] - - def create_cachedata(self): - data = [{}, {}] - return data - -codeparsercache = CodeParserCache() - -def parser_cache_init(cachedir): - codeparsercache.init_cache(cachedir) - -def parser_cache_save(): - codeparsercache.save_extras() - -def parser_cache_savemerge(): - codeparsercache.save_merge() - -Logger = logging.getLoggerClass() -class BufferedLogger(Logger): - def __init__(self, name, level=0, target=None): - Logger.__init__(self, name) - self.setLevel(level) - self.buffer = [] - self.target = target - - def handle(self, record): - self.buffer.append(record) - - def flush(self): - for record in self.buffer: - if self.target.isEnabledFor(record.levelno): - self.target.handle(record) - self.buffer = [] - -class DummyLogger(): - def flush(self): - return - -class PythonParser(): - getvars = (".getVar", ".appendVar", ".prependVar", "oe.utils.conditional") - getvarflags = (".getVarFlag", ".appendVarFlag", ".prependVarFlag") - containsfuncs = ("bb.utils.contains", "base_contains") - containsanyfuncs = ("bb.utils.contains_any", "bb.utils.filter") - execfuncs = ("bb.build.exec_func", "bb.build.exec_task") - - def warn(self, func, arg): - """Warn about calls of bitbake APIs which pass a non-literal - argument for the variable name, as we're not able to track such - a reference. - """ - - try: - funcstr = codegen.to_source(func) - argstr = codegen.to_source(arg) - except TypeError: - self.log.debug2('Failed to convert function and argument to source form') - else: - self.log.debug(self.unhandled_message % (funcstr, argstr)) - - def visit_Call(self, node): - name = self.called_node_name(node.func) - if name and name in modulecode_deps and modulecode_deps[name][5]: - visitorcode = modulecode_deps[name][5] - contains, execs, warn = visitorcode(name, node.args) - for i in contains: - self.contains[i] = contains[i] - self.execs |= execs - if warn: - self.warn(node.func, warn) - elif name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs): - if isinstance(node.args[0], ast.Constant) and isinstance(node.args[0].value, str): - varname = node.args[0].value - if name in self.containsfuncs and isinstance(node.args[1], ast.Constant): - if varname not in self.contains: - self.contains[varname] = set() - self.contains[varname].add(node.args[1].value) - elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Constant): - if varname not in self.contains: - self.contains[varname] = set() - self.contains[varname].update(node.args[1].value.split()) - elif name.endswith(self.getvarflags): - if isinstance(node.args[1], ast.Constant): - self.references.add('%s[%s]' % (varname, node.args[1].value)) - else: - self.warn(node.func, node.args[1]) - else: - self.references.add(varname) - else: - self.warn(node.func, node.args[0]) - elif name and name.endswith(".expand"): - if isinstance(node.args[0], ast.Constant): - value = node.args[0].value - d = bb.data.init() - parser = d.expandWithRefs(value, self.name) - self.references |= parser.references - self.execs |= parser.execs - for varname in parser.contains: - if varname not in self.contains: - self.contains[varname] = set() - self.contains[varname] |= parser.contains[varname] - elif name in self.execfuncs: - if isinstance(node.args[0], ast.Constant): - self.var_execs.add(node.args[0].value) - else: - self.warn(node.func, node.args[0]) - elif name and isinstance(node.func, (ast.Name, ast.Attribute)): - self.execs.add(name) - - def called_node_name(self, node): - """Given a called node, return its original string form""" - components = [] - while node: - if isinstance(node, ast.Attribute): - components.append(node.attr) - node = node.value - elif isinstance(node, ast.Name): - components.append(node.id) - return '.'.join(reversed(components)) - else: - break - - def __init__(self, name, log): - self.name = name - self.var_execs = set() - self.contains = {} - self.execs = set() - self.references = set() - self._log = log - # Defer init as expensive - self.log = DummyLogger() - - self.unhandled_message = "in call of %s, argument '%s' is not a string literal" - self.unhandled_message = "while parsing %s, %s" % (name, self.unhandled_message) - - # For the python module code it is expensive to have the function text so it is - # uses a different fixedhash to cache against. We can take the hit on obtaining the - # text if it isn't in the cache. - def parse_python(self, node, lineno=0, filename="", fixedhash=None, func=None): - if not fixedhash and (not node or not node.strip()): - return - - if fixedhash: - h = fixedhash - else: - h = bbhash(str(node)) - - if h in codeparsercache.pythoncache: - self.references = set(codeparsercache.pythoncache[h].refs) - self.execs = set(codeparsercache.pythoncache[h].execs) - self.contains = {} - for i in codeparsercache.pythoncache[h].contains: - self.contains[i] = set(codeparsercache.pythoncache[h].contains[i]) - self.extra = codeparsercache.pythoncache[h].extra - return - - if h in codeparsercache.pythoncacheextras: - self.references = set(codeparsercache.pythoncacheextras[h].refs) - self.execs = set(codeparsercache.pythoncacheextras[h].execs) - self.contains = {} - for i in codeparsercache.pythoncacheextras[h].contains: - self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i]) - self.extra = codeparsercache.pythoncacheextras[h].extra - return - - if fixedhash and not node: - raise KeyError - - # Need to parse so take the hit on the real log buffer - self.log = BufferedLogger('BitBake.Data.PythonParser', logging.DEBUG, self._log) - - # We can't add to the linenumbers for compile, we can pad to the correct number of blank lines though - node = "\n" * int(lineno) + node - code = compile(check_indent(str(node)), filename, "exec", - ast.PyCF_ONLY_AST) - - for n in ast.walk(code): - if n.__class__.__name__ == "Call": - self.visit_Call(n) - - if func is not None: - self.references |= getattr(func, "bb_vardeps", set()) - self.references -= getattr(func, "bb_vardepsexclude", set()) - - self.execs.update(self.var_execs) - self.extra = None - if fixedhash: - self.extra = bbhash(str(node)) - - codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains, self.extra) - -class ShellParser(): - def __init__(self, name, log): - self.funcdefs = set() - self.allexecs = set() - self.execs = set() - self._name = name - self._log = log - # Defer init as expensive - self.log = DummyLogger() - - self.unhandled_template = "unable to handle non-literal command '%s'" - self.unhandled_template = "while parsing %s, %s" % (name, self.unhandled_template) - - def parse_shell(self, value): - """Parse the supplied shell code in a string, returning the external - commands it executes. - """ - - h = bbhash(str(value)) - - if h in codeparsercache.shellcache: - self.execs = set(codeparsercache.shellcache[h].execs) - return self.execs - - if h in codeparsercache.shellcacheextras: - self.execs = set(codeparsercache.shellcacheextras[h].execs) - return self.execs - - # Need to parse so take the hit on the real log buffer - self.log = BufferedLogger('BitBake.Data.%s' % self._name, logging.DEBUG, self._log) - - self._parse_shell(value) - self.execs = set(cmd for cmd in self.allexecs if cmd not in self.funcdefs) - - codeparsercache.shellcacheextras[h] = codeparsercache.newShellCacheLine(self.execs) - - return self.execs - - def _parse_shell(self, value): - try: - tokens, _ = pyshyacc.parse(value, eof=True, debug=False) - except Exception: - bb.error('Error during parse shell code, the last 5 lines are:\n%s' % '\n'.join(value.split('\n')[-5:])) - raise - - self.process_tokens(tokens) - - def process_tokens(self, tokens): - """Process a supplied portion of the syntax tree as returned by - pyshyacc.parse. - """ - - def function_definition(value): - self.funcdefs.add(value.name) - return [value.body], None - - def case_clause(value): - # Element 0 of each item in the case is the list of patterns, and - # Element 1 of each item in the case is the list of commands to be - # executed when that pattern matches. - words = chain(*[item[0] for item in value.items]) - cmds = chain(*[item[1] for item in value.items]) - return cmds, words - - def if_clause(value): - main = chain(value.cond, value.if_cmds) - rest = value.else_cmds - if isinstance(rest, tuple) and rest[0] == "elif": - return chain(main, if_clause(rest[1])) - else: - return chain(main, rest) - - def simple_command(value): - return None, chain(value.words, (assign[1] for assign in value.assigns)) - - token_handlers = { - "and_or": lambda x: ((x.left, x.right), None), - "async": lambda x: ([x], None), - "brace_group": lambda x: (x.cmds, None), - "for_clause": lambda x: (x.cmds, x.items), - "function_definition": function_definition, - "if_clause": lambda x: (if_clause(x), None), - "pipeline": lambda x: (x.commands, None), - "redirect_list": lambda x: ([x.cmd], None), - "subshell": lambda x: (x.cmds, None), - "while_clause": lambda x: (chain(x.condition, x.cmds), None), - "until_clause": lambda x: (chain(x.condition, x.cmds), None), - "simple_command": simple_command, - "case_clause": case_clause, - } - - def process_token_list(tokens): - for token in tokens: - if isinstance(token, list): - process_token_list(token) - continue - name, value = token - try: - more_tokens, words = token_handlers[name](value) - except KeyError: - raise NotImplementedError("Unsupported token type " + name) - - if more_tokens: - self.process_tokens(more_tokens) - - if words: - self.process_words(words) - - process_token_list(tokens) - - def process_words(self, words): - """Process a set of 'words' in pyshyacc parlance, which includes - extraction of executed commands from $() blocks, as well as grabbing - the command name argument. - """ - - words = list(words) - for word in words: - wtree = pyshlex.make_wordtree(word[1]) - for part in wtree: - if not isinstance(part, list): - continue - - candidates = [part] - - # If command is of type: - # - # var="... $(cmd [...]) ..." - # - # Then iterate on what's between the quotes and if we find a - # list, make that what we check for below. - if len(part) >= 3 and part[0] == '"': - for p in part[1:-1]: - if isinstance(p, list): - candidates.append(p) - - for candidate in candidates: - if len(candidate) >= 2: - if candidate[0] in ('`', '$('): - command = pyshlex.wordtree_as_string(candidate[1:-1]) - self._parse_shell(command) - - if word[0] in ("cmd_name", "cmd_word"): - if word in words: - words.remove(word) - - usetoken = False - for word in words: - if word[0] in ("cmd_name", "cmd_word") or \ - (usetoken and word[0] == "TOKEN"): - if "=" in word[1]: - usetoken = True - continue - - cmd = word[1] - if cmd.startswith("$"): - self.log.debug(self.unhandled_template % cmd) - elif cmd == "eval": - command = " ".join(word for _, word in words[1:]) - self._parse_shell(command) - else: - self.allexecs.add(cmd) - break diff --git a/bitbake/lib/bb/command.py b/bitbake/lib/bb/command.py deleted file mode 100644 index 59a979ee90..0000000000 --- a/bitbake/lib/bb/command.py +++ /dev/null @@ -1,813 +0,0 @@ -""" -BitBake 'Command' module - -Provide an interface to interact with the bitbake server through 'commands' -""" - -# Copyright (C) 2006-2007 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -""" -The bitbake server takes 'commands' from its UI/commandline. -Commands are either synchronous or asynchronous. -Async commands return data to the client in the form of events. -Sync commands must only return data through the function return value -and must not trigger events, directly or indirectly. -Commands are queued in a CommandQueue -""" - -from collections import OrderedDict, defaultdict - -import io -import bb.event -import bb.cooker -import bb.remotedata -import bb.parse - -class DataStoreConnectionHandle(object): - def __init__(self, dsindex=0): - self.dsindex = dsindex - -class CommandCompleted(bb.event.Event): - pass - -class CommandExit(bb.event.Event): - def __init__(self, exitcode): - bb.event.Event.__init__(self) - self.exitcode = int(exitcode) - -class CommandFailed(CommandExit): - def __init__(self, message): - self.error = message - CommandExit.__init__(self, 1) - def __str__(self): - return "Command execution failed: %s" % self.error - -class CommandError(Exception): - pass - -class Command: - """ - A queue of asynchronous commands for bitbake - """ - def __init__(self, cooker, process_server): - self.cooker = cooker - self.cmds_sync = CommandsSync() - self.cmds_async = CommandsAsync() - self.remotedatastores = None - - self.process_server = process_server - # Access with locking using process_server.{get/set/clear}_async_cmd() - self.currentAsyncCommand = None - - def runCommand(self, commandline, process_server, ro_only=False): - command = commandline.pop(0) - - # Ensure cooker is ready for commands - if command not in ["updateConfig", "setFeatures", "ping"]: - try: - self.cooker.init_configdata() - if not self.remotedatastores: - self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker) - except (Exception, SystemExit) as exc: - import traceback - if isinstance(exc, bb.BBHandledException): - # We need to start returning real exceptions here. Until we do, we can't - # tell if an exception is an instance of bb.BBHandledException - return None, "bb.BBHandledException()\n" + traceback.format_exc() - return None, traceback.format_exc() - - if hasattr(CommandsSync, command): - # Can run synchronous commands straight away - command_method = getattr(self.cmds_sync, command) - if ro_only: - if not hasattr(command_method, 'readonly') or not getattr(command_method, 'readonly'): - return None, "Not able to execute not readonly commands in readonly mode" - try: - if getattr(command_method, 'needconfig', True): - self.cooker.updateCacheSync() - result = command_method(self, commandline) - except CommandError as exc: - return None, exc.args[0] - except (Exception, SystemExit) as exc: - import traceback - if isinstance(exc, bb.BBHandledException): - # We need to start returning real exceptions here. Until we do, we can't - # tell if an exception is an instance of bb.BBHandledException - return None, "bb.BBHandledException()\n" + traceback.format_exc() - return None, traceback.format_exc() - else: - return result, None - if command not in CommandsAsync.__dict__: - return None, "No such command" - if not process_server.set_async_cmd((command, commandline)): - return None, "Busy (%s in progress)" % self.process_server.get_async_cmd()[0] - self.cooker.idleCallBackRegister(self.runAsyncCommand, process_server) - return True, None - - def runAsyncCommand(self, _, process_server, halt): - try: - if self.cooker.state in (bb.cooker.State.ERROR, bb.cooker.State.SHUTDOWN, bb.cooker.State.FORCE_SHUTDOWN): - # updateCache will trigger a shutdown of the parser - # and then raise BBHandledException triggering an exit - self.cooker.updateCache() - return bb.server.process.idleFinish("Cooker in error state") - cmd = process_server.get_async_cmd() - if cmd is not None: - (command, options) = cmd - commandmethod = getattr(CommandsAsync, command) - needcache = getattr( commandmethod, "needcache" ) - if needcache and self.cooker.state != bb.cooker.State.RUNNING: - self.cooker.updateCache() - return True - else: - commandmethod(self.cmds_async, self, options) - return False - else: - return bb.server.process.idleFinish("Nothing to do, no async command?") - except KeyboardInterrupt as exc: - return bb.server.process.idleFinish("Interrupted") - except SystemExit as exc: - arg = exc.args[0] - if isinstance(arg, str): - return bb.server.process.idleFinish(arg) - else: - return bb.server.process.idleFinish("Exited with %s" % arg) - except Exception as exc: - import traceback - if isinstance(exc, bb.BBHandledException): - return bb.server.process.idleFinish("") - else: - return bb.server.process.idleFinish(traceback.format_exc()) - - def finishAsyncCommand(self, msg=None, code=None): - self.cooker.finishcommand() - self.process_server.clear_async_cmd() - if msg or msg == "": - bb.event.fire(CommandFailed(msg), self.cooker.data) - elif code: - bb.event.fire(CommandExit(code), self.cooker.data) - else: - bb.event.fire(CommandCompleted(), self.cooker.data) - - def reset(self): - if self.remotedatastores: - self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker) - -class CommandsSync: - """ - A class of synchronous commands - These should run quickly so as not to hurt interactive performance. - These must not influence any running synchronous command. - """ - - def ping(self, command, params): - """ - Allow a UI to check the server is still alive - """ - return "Still alive!" - ping.needconfig = False - ping.readonly = True - - def stateShutdown(self, command, params): - """ - Trigger cooker 'shutdown' mode - """ - command.cooker.shutdown(False) - - def stateForceShutdown(self, command, params): - """ - Stop the cooker - """ - command.cooker.shutdown(True) - - def getAllKeysWithFlags(self, command, params): - """ - Returns a dump of the global state. Call with - variable flags to be retrieved as params. - """ - flaglist = params[0] - return command.cooker.getAllKeysWithFlags(flaglist) - getAllKeysWithFlags.readonly = True - - def getVariable(self, command, params): - """ - Read the value of a variable from data - """ - varname = params[0] - expand = True - if len(params) > 1: - expand = (params[1] == "True") - - return command.cooker.data.getVar(varname, expand) - getVariable.readonly = True - - def setVariable(self, command, params): - """ - Set the value of variable in data - """ - varname = params[0] - value = str(params[1]) - command.cooker.extraconfigdata[varname] = value - command.cooker.data.setVar(varname, value) - - def getSetVariable(self, command, params): - """ - Read the value of a variable from data and set it into the datastore - which effectively expands and locks the value. - """ - varname = params[0] - result = self.getVariable(command, params) - command.cooker.data.setVar(varname, result) - return result - - def setConfig(self, command, params): - """ - Set the value of variable in configuration - """ - varname = params[0] - value = str(params[1]) - setattr(command.cooker.configuration, varname, value) - - def enableDataTracking(self, command, params): - """ - Enable history tracking for variables - """ - command.cooker.enableDataTracking() - - def disableDataTracking(self, command, params): - """ - Disable history tracking for variables - """ - command.cooker.disableDataTracking() - - def setPrePostConfFiles(self, command, params): - prefiles = params[0].split() - postfiles = params[1].split() - command.cooker.configuration.prefile = prefiles - command.cooker.configuration.postfile = postfiles - setPrePostConfFiles.needconfig = False - - def matchFile(self, command, params): - fMatch = params[0] - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.matchFile(fMatch, mc) - matchFile.needconfig = False - - def getUIHandlerNum(self, command, params): - return bb.event.get_uihandler() - getUIHandlerNum.needconfig = False - getUIHandlerNum.readonly = True - - def setEventMask(self, command, params): - handlerNum = params[0] - llevel = params[1] - debug_domains = params[2] - mask = params[3] - return bb.event.set_UIHmask(handlerNum, llevel, debug_domains, mask) - setEventMask.needconfig = False - setEventMask.readonly = True - - def setFeatures(self, command, params): - """ - Set the cooker features to include the passed list of features - """ - features = params[0] - command.cooker.setFeatures(features) - setFeatures.needconfig = False - # although we change the internal state of the cooker, this is transparent since - # we always take and leave the cooker in state.initial - setFeatures.readonly = True - - def updateConfig(self, command, params): - options = params[0] - environment = params[1] - cmdline = params[2] - command.cooker.updateConfigOpts(options, environment, cmdline) - updateConfig.needconfig = False - - def parseConfiguration(self, command, params): - """Instruct bitbake to parse its configuration - NOTE: it is only necessary to call this if you aren't calling any normal action - (otherwise parsing is taken care of automatically) - """ - command.cooker.parseConfiguration() - parseConfiguration.needconfig = False - - def getLayerPriorities(self, command, params): - command.cooker.parseConfiguration() - ret = [] - # regex objects cannot be marshalled by xmlrpc - for collection, pattern, regex, pri in command.cooker.bbfile_config_priorities: - ret.append((collection, pattern, regex.pattern, pri)) - return ret - getLayerPriorities.readonly = True - - def revalidateCaches(self, command, params): - """Called by UI clients when metadata may have changed""" - command.cooker.revalidateCaches() - revalidateCaches.needconfig = False - - def getRecipes(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return list(command.cooker.recipecaches[mc].pkg_pn.items()) - getRecipes.readonly = True - - def getRecipeDepends(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return list(command.cooker.recipecaches[mc].deps.items()) - getRecipeDepends.readonly = True - - def getRecipeVersions(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.recipecaches[mc].pkg_pepvpr - getRecipeVersions.readonly = True - - def getRecipeProvides(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.recipecaches[mc].fn_provides - getRecipeProvides.readonly = True - - def getRecipePackages(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.recipecaches[mc].packages - getRecipePackages.readonly = True - - def getRecipePackagesDynamic(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.recipecaches[mc].packages_dynamic - getRecipePackagesDynamic.readonly = True - - def getRProviders(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.recipecaches[mc].rproviders - getRProviders.readonly = True - - def getRuntimeDepends(self, command, params): - ret = [] - try: - mc = params[0] - except IndexError: - mc = '' - rundeps = command.cooker.recipecaches[mc].rundeps - for key, value in rundeps.items(): - if isinstance(value, defaultdict): - value = dict(value) - ret.append((key, value)) - return ret - getRuntimeDepends.readonly = True - - def getRuntimeRecommends(self, command, params): - ret = [] - try: - mc = params[0] - except IndexError: - mc = '' - runrecs = command.cooker.recipecaches[mc].runrecs - for key, value in runrecs.items(): - if isinstance(value, defaultdict): - value = dict(value) - ret.append((key, value)) - return ret - getRuntimeRecommends.readonly = True - - def getRecipeInherits(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.recipecaches[mc].inherits - getRecipeInherits.readonly = True - - def getBbFilePriority(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.recipecaches[mc].bbfile_priority - getBbFilePriority.readonly = True - - def getDefaultPreference(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.recipecaches[mc].pkg_dp - getDefaultPreference.readonly = True - - - def getSkippedRecipes(self, command, params): - """ - Get the map of skipped recipes for the specified multiconfig/mc name (`params[0]`). - - Invoked by `bb.tinfoil.Tinfoil.get_skipped_recipes` - - :param command: Internally used parameter. - :param params: Parameter array. params[0] is multiconfig/mc name. If not given, then default mc '' is assumed. - :return: Dict whose keys are virtualfns and values are `bb.cooker.SkippedPackage` - """ - try: - mc = params[0] - except IndexError: - mc = '' - - # Return list sorted by reverse priority order - import bb.cache - def sortkey(x): - vfn, _ = x - realfn, _, item_mc = bb.cache.virtualfn2realfn(vfn) - return -command.cooker.collections[item_mc].calc_bbfile_priority(realfn)[0], vfn - - skipdict = OrderedDict(sorted(command.cooker.skiplist_by_mc[mc].items(), key=sortkey)) - return list(skipdict.items()) - getSkippedRecipes.readonly = True - - def getOverlayedRecipes(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return list(command.cooker.collections[mc].overlayed.items()) - getOverlayedRecipes.readonly = True - - def getFileAppends(self, command, params): - fn = params[0] - try: - mc = params[1] - except IndexError: - mc = '' - return command.cooker.collections[mc].get_file_appends(fn) - getFileAppends.readonly = True - - def getAllAppends(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.collections[mc].bbappends - getAllAppends.readonly = True - - def findProviders(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return command.cooker.findProviders(mc) - findProviders.readonly = True - - def findBestProvider(self, command, params): - (mc, pn) = bb.runqueue.split_mc(params[0]) - return command.cooker.findBestProvider(pn, mc) - findBestProvider.readonly = True - - def allProviders(self, command, params): - try: - mc = params[0] - except IndexError: - mc = '' - return list(bb.providers.allProviders(command.cooker.recipecaches[mc]).items()) - allProviders.readonly = True - - def getRuntimeProviders(self, command, params): - rprovide = params[0] - try: - mc = params[1] - except IndexError: - mc = '' - all_p = bb.providers.getRuntimeProviders(command.cooker.recipecaches[mc], rprovide) - if all_p: - best = bb.providers.filterProvidersRunTime(all_p, rprovide, - command.cooker.data, - command.cooker.recipecaches[mc])[0][0] - else: - best = None - return all_p, best - getRuntimeProviders.readonly = True - - def dataStoreConnectorCmd(self, command, params): - dsindex = params[0] - method = params[1] - args = params[2] - kwargs = params[3] - - d = command.remotedatastores[dsindex] - ret = getattr(d, method)(*args, **kwargs) - - if isinstance(ret, bb.data_smart.DataSmart): - idx = command.remotedatastores.store(ret) - return DataStoreConnectionHandle(idx) - - return ret - - def dataStoreConnectorVarHistCmd(self, command, params): - dsindex = params[0] - method = params[1] - args = params[2] - kwargs = params[3] - - d = command.remotedatastores[dsindex].varhistory - return getattr(d, method)(*args, **kwargs) - - def dataStoreConnectorVarHistCmdEmit(self, command, params): - dsindex = params[0] - var = params[1] - oval = params[2] - val = params[3] - d = command.remotedatastores[params[4]] - - o = io.StringIO() - command.remotedatastores[dsindex].varhistory.emit(var, oval, val, o, d) - return o.getvalue() - - def dataStoreConnectorIncHistCmd(self, command, params): - dsindex = params[0] - method = params[1] - args = params[2] - kwargs = params[3] - - d = command.remotedatastores[dsindex].inchistory - return getattr(d, method)(*args, **kwargs) - - def dataStoreConnectorRelease(self, command, params): - dsindex = params[0] - if dsindex <= 0: - raise CommandError('dataStoreConnectorRelease: invalid index %d' % dsindex) - command.remotedatastores.release(dsindex) - - def parseRecipeFile(self, command, params): - """ - Parse the specified recipe file (with or without bbappends) - and return a datastore object representing the environment - for the recipe. - """ - virtualfn = params[0] - (fn, cls, mc) = bb.cache.virtualfn2realfn(virtualfn) - appends = params[1] - appendlist = params[2] - if len(params) > 3: - config_data = command.remotedatastores[params[3]] - else: - config_data = None - - if appends: - if appendlist is not None: - appendfiles = appendlist - else: - appendfiles = command.cooker.collections[mc].get_file_appends(fn) - else: - appendfiles = [] - layername = command.cooker.collections[mc].calc_bbfile_priority(fn)[2] - # We are calling bb.cache locally here rather than on the server, - # but that's OK because it doesn't actually need anything from - # the server barring the global datastore (which we have a remote - # version of) - if config_data: - # We have to use a different function here if we're passing in a datastore - # NOTE: we took a copy above, so we don't do it here again - envdata = command.cooker.databuilder._parse_recipe(config_data, fn, appendfiles, mc, layername)[cls] - else: - # Use the standard path - envdata = command.cooker.databuilder.parseRecipe(virtualfn, appendfiles, layername) - idx = command.remotedatastores.store(envdata) - return DataStoreConnectionHandle(idx) - parseRecipeFile.readonly = True - - def finalizeData(self, command, params): - newdata = command.cooker.data.createCopy() - bb.data.expandKeys(newdata) - bb.parse.ast.runAnonFuncs(newdata) - idx = command.remotedatastores.store(newdata) - return DataStoreConnectionHandle(idx) - -class CommandsAsync: - """ - A class of asynchronous commands - These functions communicate via generated events. - Any function that requires metadata parsing should be here. - """ - - def buildFile(self, command, params): - """ - Build a single specified .bb file - """ - bfile = params[0] - task = params[1] - if len(params) > 2: - internal = params[2] - else: - internal = False - - if internal: - command.cooker.buildFileInternal(bfile, task, fireevents=False, quietlog=True) - else: - command.cooker.buildFile(bfile, task) - buildFile.needcache = False - - def buildTargets(self, command, params): - """ - Build a set of targets - """ - pkgs_to_build = params[0] - task = params[1] - - command.cooker.buildTargets(pkgs_to_build, task) - buildTargets.needcache = True - - def generateDepTreeEvent(self, command, params): - """ - Generate an event containing the dependency information - """ - pkgs_to_build = params[0] - task = params[1] - - command.cooker.generateDepTreeEvent(pkgs_to_build, task) - command.finishAsyncCommand() - generateDepTreeEvent.needcache = True - - def generateDotGraph(self, command, params): - """ - Dump dependency information to disk as .dot files - """ - pkgs_to_build = params[0] - task = params[1] - - command.cooker.generateDotGraphFiles(pkgs_to_build, task) - command.finishAsyncCommand() - generateDotGraph.needcache = True - - def generateTargetsTree(self, command, params): - """ - Generate a tree of buildable targets. - If klass is provided ensure all recipes that inherit the class are - included in the package list. - If pkg_list provided use that list (plus any extras brought in by - klass) rather than generating a tree for all packages. - """ - klass = params[0] - pkg_list = params[1] - - command.cooker.generateTargetsTree(klass, pkg_list) - command.finishAsyncCommand() - generateTargetsTree.needcache = True - - def findConfigFiles(self, command, params): - """ - Find config files which provide appropriate values - for the passed configuration variable. i.e. MACHINE - """ - varname = params[0] - - command.cooker.findConfigFiles(varname) - command.finishAsyncCommand() - findConfigFiles.needcache = False - - def findFilesMatchingInDir(self, command, params): - """ - Find implementation files matching the specified pattern - in the requested subdirectory of a BBPATH - """ - pattern = params[0] - directory = params[1] - - command.cooker.findFilesMatchingInDir(pattern, directory) - command.finishAsyncCommand() - findFilesMatchingInDir.needcache = False - - def testCookerCommandEvent(self, command, params): - """ - Dummy command used by OEQA selftest to test tinfoil without IO - """ - pattern = params[0] - - command.cooker.testCookerCommandEvent(pattern) - command.finishAsyncCommand() - testCookerCommandEvent.needcache = False - - def findConfigFilePath(self, command, params): - """ - Find the path of the requested configuration file - """ - configfile = params[0] - - command.cooker.findConfigFilePath(configfile) - command.finishAsyncCommand() - findConfigFilePath.needcache = False - - def showVersions(self, command, params): - """ - Show the currently selected versions - """ - command.cooker.showVersions() - command.finishAsyncCommand() - showVersions.needcache = True - - def showEnvironmentTarget(self, command, params): - """ - Print the environment of a target recipe - (needs the cache to work out which recipe to use) - """ - pkg = params[0] - - command.cooker.showEnvironment(None, pkg) - command.finishAsyncCommand() - showEnvironmentTarget.needcache = True - - def showEnvironment(self, command, params): - """ - Print the standard environment - or if specified the environment for a specified recipe - """ - bfile = params[0] - - command.cooker.showEnvironment(bfile) - command.finishAsyncCommand() - showEnvironment.needcache = False - - def parseFiles(self, command, params): - """ - Parse the .bb files - """ - command.cooker.updateCache() - command.finishAsyncCommand() - parseFiles.needcache = True - - def compareRevisions(self, command, params): - """ - Parse the .bb files - """ - if bb.fetch.fetcher_compare_revisions(command.cooker.data): - command.finishAsyncCommand(code=1) - else: - command.finishAsyncCommand() - compareRevisions.needcache = True - - def triggerEvent(self, command, params): - """ - Trigger a certain event - """ - event = params[0] - bb.event.fire(eval(event), command.cooker.data) - process_server.clear_async_cmd() - triggerEvent.needcache = False - - def resetCooker(self, command, params): - """ - Reset the cooker to its initial state, thus forcing a reparse for - any async command that has the needcache property set to True - """ - command.cooker.reset() - command.finishAsyncCommand() - resetCooker.needcache = False - - def clientComplete(self, command, params): - """ - Do the right thing when the controlling client exits - """ - command.cooker.clientComplete() - command.finishAsyncCommand() - clientComplete.needcache = False - - def findSigInfo(self, command, params): - """ - Find signature info files via the signature generator - """ - (mc, pn) = bb.runqueue.split_mc(params[0]) - taskname = params[1] - sigs = params[2] - bb.siggen.check_siggen_version(bb.siggen) - res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.databuilder.mcdata[mc]) - bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.databuilder.mcdata[mc]) - command.finishAsyncCommand() - findSigInfo.needcache = False - - def getTaskSignatures(self, command, params): - res = command.cooker.getTaskSignatures(params[0], params[1]) - bb.event.fire(bb.event.GetTaskSignatureResult(res), command.cooker.data) - command.finishAsyncCommand() - getTaskSignatures.needcache = True diff --git a/bitbake/lib/bb/compress/_pipecompress.py b/bitbake/lib/bb/compress/_pipecompress.py deleted file mode 100644 index 4a403d62cf..0000000000 --- a/bitbake/lib/bb/compress/_pipecompress.py +++ /dev/null @@ -1,196 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Helper library to implement streaming compression and decompression using an -# external process -# -# This library should be used directly by end users; a wrapper library for the -# specific compression tool should be created - -import builtins -import io -import os -import subprocess - - -def open_wrap( - cls, filename, mode="rb", *, encoding=None, errors=None, newline=None, **kwargs -): - """ - Open a compressed file in binary or text mode. - - Users should not call this directly. A specific compression library can use - this helper to provide it's own "open" command - - The filename argument can be an actual filename (a str or bytes object), or - an existing file object to read from or write to. - - The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or "ab" for - binary mode, or "rt", "wt", "xt" or "at" for text mode. The default mode is - "rb". - - For binary mode, this function is equivalent to the cls constructor: - cls(filename, mode). In this case, the encoding, errors and newline - arguments must not be provided. - - For text mode, a cls object is created, and wrapped in an - io.TextIOWrapper instance with the specified encoding, error handling - behavior, and line ending(s). - """ - if "t" in mode: - if "b" in mode: - raise ValueError("Invalid mode: %r" % (mode,)) - else: - if encoding is not None: - raise ValueError("Argument 'encoding' not supported in binary mode") - if errors is not None: - raise ValueError("Argument 'errors' not supported in binary mode") - if newline is not None: - raise ValueError("Argument 'newline' not supported in binary mode") - - file_mode = mode.replace("t", "") - if isinstance(filename, (str, bytes, os.PathLike, int)): - binary_file = cls(filename, file_mode, **kwargs) - elif hasattr(filename, "read") or hasattr(filename, "write"): - binary_file = cls(None, file_mode, fileobj=filename, **kwargs) - else: - raise TypeError("filename must be a str or bytes object, or a file") - - if "t" in mode: - return io.TextIOWrapper( - binary_file, encoding, errors, newline, write_through=True - ) - else: - return binary_file - - -class CompressionError(OSError): - pass - - -class PipeFile(io.RawIOBase): - """ - Class that implements generically piping to/from a compression program - - Derived classes should add the function get_compress() and get_decompress() - that return the required commands. Input will be piped into stdin and the - (de)compressed output should be written to stdout, e.g.: - - class FooFile(PipeCompressionFile): - def get_decompress(self): - return ["fooc", "--decompress", "--stdout"] - - def get_compress(self): - return ["fooc", "--compress", "--stdout"] - - """ - - READ = 0 - WRITE = 1 - - def __init__(self, filename=None, mode="rb", *, stderr=None, fileobj=None): - if "t" in mode or "U" in mode: - raise ValueError("Invalid mode: {!r}".format(mode)) - - if not "b" in mode: - mode += "b" - - if mode.startswith("r"): - self.mode = self.READ - elif mode.startswith("w"): - self.mode = self.WRITE - else: - raise ValueError("Invalid mode %r" % mode) - - if fileobj is not None: - self.fileobj = fileobj - else: - self.fileobj = builtins.open(filename, mode or "rb") - - if self.mode == self.READ: - self.p = subprocess.Popen( - self.get_decompress(), - stdin=self.fileobj, - stdout=subprocess.PIPE, - stderr=stderr, - close_fds=True, - ) - self.pipe = self.p.stdout - else: - self.p = subprocess.Popen( - self.get_compress(), - stdin=subprocess.PIPE, - stdout=self.fileobj, - stderr=stderr, - close_fds=True, - ) - self.pipe = self.p.stdin - - self.__closed = False - - def _check_process(self): - if self.p is None: - return - - returncode = self.p.wait() - if returncode: - raise CompressionError("Process died with %d" % returncode) - self.p = None - - def close(self): - if self.closed: - return - - self.pipe.close() - if self.p is not None: - self._check_process() - self.fileobj.close() - - self.__closed = True - - @property - def closed(self): - return self.__closed - - def fileno(self): - return self.pipe.fileno() - - def flush(self): - self.pipe.flush() - - def isatty(self): - return self.pipe.isatty() - - def readable(self): - return self.mode == self.READ - - def writable(self): - return self.mode == self.WRITE - - def readinto(self, b): - if self.mode != self.READ: - import errno - - raise OSError( - errno.EBADF, "read() on write-only %s object" % self.__class__.__name__ - ) - size = self.pipe.readinto(b) - if size == 0: - self._check_process() - return size - - def write(self, data): - if self.mode != self.WRITE: - import errno - - raise OSError( - errno.EBADF, "write() on read-only %s object" % self.__class__.__name__ - ) - data = self.pipe.write(data) - - if not data: - self._check_process() - - return data diff --git a/bitbake/lib/bb/compress/lz4.py b/bitbake/lib/bb/compress/lz4.py deleted file mode 100644 index 2a64681c86..0000000000 --- a/bitbake/lib/bb/compress/lz4.py +++ /dev/null @@ -1,19 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import bb.compress._pipecompress - - -def open(*args, **kwargs): - return bb.compress._pipecompress.open_wrap(LZ4File, *args, **kwargs) - - -class LZ4File(bb.compress._pipecompress.PipeFile): - def get_compress(self): - return ["lz4", "-z", "-c"] - - def get_decompress(self): - return ["lz4", "-d", "-c"] diff --git a/bitbake/lib/bb/compress/zstd.py b/bitbake/lib/bb/compress/zstd.py deleted file mode 100644 index cdbbe9d60f..0000000000 --- a/bitbake/lib/bb/compress/zstd.py +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import bb.compress._pipecompress -import shutil - - -def open(*args, **kwargs): - return bb.compress._pipecompress.open_wrap(ZstdFile, *args, **kwargs) - - -class ZstdFile(bb.compress._pipecompress.PipeFile): - def __init__(self, *args, num_threads=1, compresslevel=3, **kwargs): - self.num_threads = num_threads - self.compresslevel = compresslevel - super().__init__(*args, **kwargs) - - def _get_zstd(self): - if self.num_threads == 1 or not shutil.which("pzstd"): - return ["zstd"] - return ["pzstd", "-p", "%d" % self.num_threads] - - def get_compress(self): - return self._get_zstd() + ["-c", "-%d" % self.compresslevel] - - def get_decompress(self): - return self._get_zstd() + ["-d", "-c"] diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py deleted file mode 100644 index 03f262ac16..0000000000 --- a/bitbake/lib/bb/cooker.py +++ /dev/null @@ -1,2384 +0,0 @@ -# -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer -# Copyright (C) 2005 Holger Hans Peter Freyther -# Copyright (C) 2005 ROAD GmbH -# Copyright (C) 2006 - 2007 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# -import enum -import sys, os, glob, os.path, re, time -import itertools -import logging -from bb import multiprocessing -import threading -from io import StringIO, UnsupportedOperation -from contextlib import closing -from collections import defaultdict, namedtuple -import bb, bb.command -from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build -import queue -import signal -import prserv.serv -import json -import pickle -import codecs -import hashserv -import ctypes - -logger = logging.getLogger("BitBake") -collectlog = logging.getLogger("BitBake.Collection") -buildlog = logging.getLogger("BitBake.Build") -parselog = logging.getLogger("BitBake.Parsing") -providerlog = logging.getLogger("BitBake.Provider") - -class NoSpecificMatch(bb.BBHandledException): - """ - Exception raised when no or multiple file matches are found - """ - -class NothingToBuild(Exception): - """ - Exception raised when there is nothing to build - """ - -class CollectionError(bb.BBHandledException): - """ - Exception raised when layer configuration is incorrect - """ - - -class State(enum.Enum): - INITIAL = 0, - PARSING = 1, - RUNNING = 2, - SHUTDOWN = 3, - FORCE_SHUTDOWN = 4, - STOPPED = 5, - ERROR = 6 - - -class SkippedPackage: - def __init__(self, info = None, reason = None): - self.pn = None - self.skipreason = None - self.provides = None - self.rprovides = None - - if info: - self.pn = info.pn - self.skipreason = info.skipreason - self.provides = info.provides - self.rprovides = info.packages + info.rprovides - for package in info.packages: - self.rprovides += info.rprovides_pkg[package] - elif reason: - self.skipreason = reason - - -class CookerFeatures(object): - _feature_list = [HOB_EXTRA_CACHES, BASEDATASTORE_TRACKING, SEND_SANITYEVENTS, RECIPE_SIGGEN_INFO] = list(range(4)) - - def __init__(self): - self._features=set() - - def setFeature(self, f): - # validate we got a request for a feature we support - if f not in CookerFeatures._feature_list: - return - self._features.add(f) - - def __contains__(self, f): - return f in self._features - - def __iter__(self): - return self._features.__iter__() - - def __next__(self): - return next(self._features) - - -class EventWriter: - def __init__(self, cooker, eventfile): - self.cooker = cooker - self.eventfile = eventfile - self.event_queue = [] - - def write_variables(self): - with open(self.eventfile, "a") as f: - f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])})) - - def send(self, event): - with open(self.eventfile, "a") as f: - try: - str_event = codecs.encode(pickle.dumps(event), 'base64').decode('utf-8') - f.write("%s\n" % json.dumps({"class": event.__module__ + "." + event.__class__.__name__, - "vars": str_event})) - except Exception as err: - import traceback - print(err, traceback.format_exc()) - - -#============================================================================# -# BBCooker -#============================================================================# -class BBCooker: - """ - Manages one bitbake build run - """ - - def __init__(self, featureSet=None, server=None): - self.recipecaches = None - self.baseconfig_valid = False - self.parsecache_valid = False - self.eventlog = None - # The skiplists, one per multiconfig - self.skiplist_by_mc = defaultdict(dict) - self.featureset = CookerFeatures() - if featureSet: - for f in featureSet: - self.featureset.setFeature(f) - - self.orig_syspath = sys.path.copy() - self.orig_sysmodules = [*sys.modules] - - self.configuration = bb.cookerdata.CookerConfiguration() - - self.process_server = server - self.idleCallBackRegister = None - self.waitIdle = None - if server: - self.idleCallBackRegister = server.register_idle_function - self.waitIdle = server.wait_for_idle - - bb.debug(1, "BBCooker starting %s" % time.time()) - - self.configwatched = {} - self.parsewatched = {} - - # If being called by something like tinfoil, we need to clean cached data - # which may now be invalid - bb.parse.clear_cache() - bb.parse.BBHandler.cached_statements = {} - - self.ui_cmdline = None - self.hashserv = None - self.hashservaddr = None - - # TOSTOP must not be set or our children will hang when they output - try: - fd = sys.stdout.fileno() - if os.isatty(fd): - import termios - tcattr = termios.tcgetattr(fd) - if tcattr[3] & termios.TOSTOP: - buildlog.info("The terminal had the TOSTOP bit set, clearing...") - tcattr[3] = tcattr[3] & ~termios.TOSTOP - termios.tcsetattr(fd, termios.TCSANOW, tcattr) - except UnsupportedOperation: - pass - - self.command = bb.command.Command(self, self.process_server) - self.state = State.INITIAL - - self.parser = None - - signal.signal(signal.SIGTERM, self.sigterm_exception) - # Let SIGHUP exit as SIGTERM - signal.signal(signal.SIGHUP, self.sigterm_exception) - - bb.debug(1, "BBCooker startup complete %s" % time.time()) - - def init_configdata(self): - if not hasattr(self, "data"): - self.initConfigurationData() - bb.debug(1, "BBCooker parsed base configuration %s" % time.time()) - self.handlePRServ() - - def _baseconfig_set(self, value): - if value and not self.baseconfig_valid: - bb.server.process.serverlog("Base config valid") - elif not value and self.baseconfig_valid: - bb.server.process.serverlog("Base config invalidated") - self.baseconfig_valid = value - - def _parsecache_set(self, value): - if value and not self.parsecache_valid: - bb.server.process.serverlog("Parse cache valid") - elif not value and self.parsecache_valid: - bb.server.process.serverlog("Parse cache invalidated") - self.parsecache_valid = value - - def add_filewatch(self, deps, configwatcher=False): - if configwatcher: - watcher = self.configwatched - else: - watcher = self.parsewatched - - for i in deps: - f = i[0] - mtime = i[1] - watcher[f] = mtime - - def sigterm_exception(self, signum, stackframe): - if signum == signal.SIGTERM: - bb.warn("Cooker received SIGTERM, shutting down...") - elif signum == signal.SIGHUP: - bb.warn("Cooker received SIGHUP, shutting down...") - self.state = State.FORCE_SHUTDOWN - bb.event._should_exit.set() - - def setFeatures(self, features): - # we only accept a new feature set if we're in state initial, so we can reset without problems - if not self.state in [State.INITIAL, State.SHUTDOWN, State.FORCE_SHUTDOWN, State.STOPPED, State.ERROR]: - raise Exception("Illegal state for feature set change") - original_featureset = list(self.featureset) - for feature in features: - self.featureset.setFeature(feature) - bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset))) - if (original_featureset != list(self.featureset)) and self.state != State.ERROR and hasattr(self, "data"): - self.reset() - - def initConfigurationData(self): - self.state = State.INITIAL - self.caches_array = [] - - sys.path = self.orig_syspath.copy() - for mod in [*sys.modules]: - if mod not in self.orig_sysmodules: - del sys.modules[mod] - - self.configwatched = {} - - # Need to preserve BB_CONSOLELOG over resets - consolelog = None - if hasattr(self, "data"): - consolelog = self.data.getVar("BB_CONSOLELOG") - - if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset: - self.enableDataTracking() - - caches_name_array = ['bb.cache:CoreRecipeInfo'] - # We hardcode all known cache types in a single place, here. - if CookerFeatures.HOB_EXTRA_CACHES in self.featureset: - caches_name_array.append("bb.cache_extra:HobRecipeInfo") - if CookerFeatures.RECIPE_SIGGEN_INFO in self.featureset: - caches_name_array.append("bb.cache:SiggenRecipeInfo") - - # At least CoreRecipeInfo will be loaded, so caches_array will never be empty! - # This is the entry point, no further check needed! - for var in caches_name_array: - try: - module_name, cache_name = var.split(':') - module = __import__(module_name, fromlist=(cache_name,)) - self.caches_array.append(getattr(module, cache_name)) - except ImportError as exc: - logger.critical("Unable to import extra RecipeInfo '%s' from '%s': %s" % (cache_name, module_name, exc)) - raise bb.BBHandledException() - - self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False) - self.databuilder.parseBaseConfiguration() - self.data = self.databuilder.data - self.extraconfigdata = {} - - eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG") - if not self.configuration.writeeventlog and eventlog: - self.setupEventLog(eventlog) - - if consolelog: - self.data.setVar("BB_CONSOLELOG", consolelog) - - self.data.setVar('BB_CMDLINE', self.ui_cmdline) - - if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset: - self.disableDataTracking() - - for mc in self.databuilder.mcdata.values(): - self.add_filewatch(mc.getVar("__base_depends", False), configwatcher=True) - - self._baseconfig_set(True) - self._parsecache_set(False) - - def handlePRServ(self): - # Setup a PR Server based on the new configuration - try: - self.prhost = prserv.serv.auto_start(self.data) - except prserv.serv.PRServiceConfigError as e: - bb.fatal("Unable to start PR Server, exiting, check the bitbake-cookerdaemon.log") - - if self.data.getVar("BB_HASHSERVE") == "auto": - # Create a new hash server bound to a unix domain socket - if not self.hashserv: - dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db" - upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None - if upstream: - try: - with hashserv.create_client(upstream) as client: - client.ping() - except ImportError as e: - bb.fatal(""""Unable to use hash equivalence server at '%s' due to missing or incorrect python module: -%s -Please install the needed module on the build host, or use an environment containing it: - - if you are using bitbake-setup, run 'bitbake-setup install-buildtools' - - openembedded-core layer contains 'scripts/install-buildtools' that can also be used - - or set up pip venv -You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in significantly longer build times as bitbake will be unable to reuse prebuilt sstate artefacts.""" - % (upstream, repr(e))) - except ConnectionError as e: - bb.warn("Unable to connect to hash equivalence server at '%s', please correct or remove BB_HASHSERVE_UPSTREAM:\n%s" - % (upstream, repr(e))) - upstream = None - - self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR") - self.hashserv = hashserv.create_server( - self.hashservaddr, - dbfile, - sync=False, - upstream=upstream, - ) - self.hashserv.serve_as_process(log_level=logging.WARNING) - for mc in self.databuilder.mcdata: - self.databuilder.mcorigdata[mc].setVar("BB_HASHSERVE", self.hashservaddr) - self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.hashservaddr) - - bb.parse.init_parser(self.data) - - def enableDataTracking(self): - self.configuration.tracking = True - if hasattr(self, "data"): - self.data.enableTracking() - - def disableDataTracking(self): - self.configuration.tracking = False - if hasattr(self, "data"): - self.data.disableTracking() - - def revalidateCaches(self): - bb.parse.clear_cache() - - clean = True - for f in self.configwatched: - if not bb.parse.check_mtime(f, self.configwatched[f]): - bb.server.process.serverlog("Found %s changed, invalid cache" % f) - self._baseconfig_set(False) - self._parsecache_set(False) - clean = False - break - - if clean: - for f in self.parsewatched: - if not bb.parse.check_mtime(f, self.parsewatched[f]): - bb.server.process.serverlog("Found %s changed, invalid cache" % f) - self._parsecache_set(False) - clean = False - break - - if not clean: - bb.parse.BBHandler.cached_statements = {} - - # If writes were made to any of the data stores, we need to recalculate the data - # store cache - if hasattr(self, "databuilder"): - self.databuilder.calc_datastore_hashes() - - def parseConfiguration(self): - self.updateCacheSync() - - # Change nice level if we're asked to - nice = self.data.getVar("BB_NICE_LEVEL") - if nice: - curnice = os.nice(0) - nice = int(nice) - curnice - buildlog.verbose("Renice to %s " % os.nice(nice)) - - if self.recipecaches: - del self.recipecaches - self.multiconfigs = self.databuilder.mcdata.keys() - self.recipecaches = {} - for mc in self.multiconfigs: - self.recipecaches[mc] = bb.cache.CacheData(self.caches_array) - - self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS")) - self.collections = {} - for mc in self.multiconfigs: - self.collections[mc] = CookerCollectFiles(self.bbfile_config_priorities, mc) - - self._parsecache_set(False) - - def setupEventLog(self, eventlog): - if self.eventlog and self.eventlog[0] != eventlog: - bb.event.unregister_UIHhandler(self.eventlog[1]) - self.eventlog = None - if not self.eventlog or self.eventlog[0] != eventlog: - # we log all events to a file if so directed - # register the log file writer as UI Handler - if not os.path.exists(os.path.dirname(eventlog)): - bb.utils.mkdirhier(os.path.dirname(eventlog)) - writer = EventWriter(self, eventlog) - EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event']) - self.eventlog = (eventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer)), writer) - - def updateConfigOpts(self, options, environment, cmdline): - self.ui_cmdline = cmdline - clean = True - for o in options: - if o in ['prefile', 'postfile']: - # Only these options may require a reparse - try: - if getattr(self.configuration, o) == options[o]: - # Value is the same, no need to mark dirty - continue - except AttributeError: - pass - logger.debug("Marking as dirty due to '%s' option change to '%s'" % (o, options[o])) - print("Marking as dirty due to '%s' option change to '%s'" % (o, options[o])) - clean = False - if hasattr(self.configuration, o): - setattr(self.configuration, o, options[o]) - - if self.configuration.writeeventlog: - self.setupEventLog(self.configuration.writeeventlog) - - bb.msg.loggerDefaultLogLevel = self.configuration.default_loglevel - bb.msg.loggerDefaultDomains = self.configuration.debug_domains - - if hasattr(self, "data"): - origenv = bb.data.init() - for k in environment: - origenv.setVar(k, environment[k]) - self.data.setVar("BB_ORIGENV", origenv) - - for k in bb.utils.approved_variables(): - if k in environment and k not in self.configuration.env: - logger.debug("Updating new environment variable %s to %s" % (k, environment[k])) - self.configuration.env[k] = environment[k] - clean = False - if k in self.configuration.env and k not in environment: - logger.debug("Updating environment variable %s (deleted)" % (k)) - del self.configuration.env[k] - clean = False - if k not in self.configuration.env and k not in environment: - continue - if environment[k] != self.configuration.env[k]: - logger.debug("Updating environment variable %s from %s to %s" % (k, self.configuration.env[k], environment[k])) - self.configuration.env[k] = environment[k] - clean = False - - # Now update all the variables not in the datastore to match - self.configuration.env = environment - - self.revalidateCaches() - if not clean: - logger.debug("Base environment change, triggering reparse") - self.reset() - - def showVersions(self): - - (latest_versions, preferred_versions, required) = self.findProviders() - - logger.plain("%-35s %25s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version", "Required Version") - logger.plain("%-35s %25s %25s %25s\n", "===========", "==============", "=================", "================") - - for p in sorted(self.recipecaches[''].pkg_pn): - preferred = preferred_versions[p] - latest = latest_versions[p] - requiredstr = "" - preferredstr = "" - if required[p]: - if preferred[0] is not None: - requiredstr = preferred[0][0] + ":" + preferred[0][1] + '-' + preferred[0][2] - else: - bb.fatal("REQUIRED_VERSION of package %s not available" % p) - else: - preferredstr = preferred[0][0] + ":" + preferred[0][1] + '-' + preferred[0][2] - - lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2] - - if preferred == latest: - preferredstr = "" - - logger.plain("%-35s %25s %25s %25s", p, lateststr, preferredstr, requiredstr) - - def showEnvironment(self, buildfile=None, pkgs_to_build=None): - """ - Show the outer or per-recipe environment - """ - fn = None - envdata = None - mc = '' - if not pkgs_to_build: - pkgs_to_build = [] - - orig_tracking = self.configuration.tracking - if not orig_tracking: - self.enableDataTracking() - self.reset() - # reset() resets to the UI requested value so we have to redo this - self.enableDataTracking() - - def mc_base(p): - if p.startswith('mc:'): - s = p.split(':') - if len(s) == 2: - return s[1] - return None - - if buildfile: - # Parse the configuration here. We need to do it explicitly here since - # this showEnvironment() code path doesn't use the cache - self.parseConfiguration() - - fn, cls, mc = bb.cache.virtualfn2realfn(buildfile) - fn = self.matchFile(fn, mc) - fn = bb.cache.realfn2virtual(fn, cls, mc) - elif len(pkgs_to_build) == 1: - mc = mc_base(pkgs_to_build[0]) - if not mc: - ignore = self.data.getVar("ASSUME_PROVIDED") or "" - if pkgs_to_build[0] in set(ignore.split()): - bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0]) - - taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.halt, allowincomplete=True) - - mc = runlist[0][0] - fn = runlist[0][3] - - if fn: - try: - layername = self.collections[mc].calc_bbfile_priority(fn)[2] - envdata = self.databuilder.parseRecipe(fn, self.collections[mc].get_file_appends(fn), layername) - except Exception as e: - parselog.exception("Unable to read %s", fn) - raise - else: - if not mc in self.databuilder.mcdata: - bb.fatal('No multiconfig named "%s" found' % mc) - envdata = self.databuilder.mcdata[mc] - data.expandKeys(envdata) - parse.ast.runAnonFuncs(envdata) - - # Display history - with closing(StringIO()) as env: - self.data.inchistory.emit(env) - logger.plain(env.getvalue()) - - # emit variables and shell functions - with closing(StringIO()) as env: - data.emit_env(env, envdata, True) - logger.plain(env.getvalue()) - - # emit the metadata which isn't valid shell - for e in sorted(envdata.keys()): - if envdata.getVarFlag(e, 'func', False) and envdata.getVarFlag(e, 'python', False): - logger.plain("\npython %s () {\n%s}\n", e, envdata.getVar(e, False)) - - if not orig_tracking: - self.disableDataTracking() - self.reset() - - def buildTaskData(self, pkgs_to_build, task, halt, allowincomplete=False): - """ - Prepare a runqueue and taskdata object for iteration over pkgs_to_build - """ - bb.event.fire(bb.event.TreeDataPreparationStarted(), self.data) - - # A task of None means use the default task - if task is None: - task = self.configuration.cmd - if not task.startswith("do_"): - task = "do_%s" % task - - targetlist = self.checkPackages(pkgs_to_build, task) - fulltargetlist = [] - defaulttask_implicit = '' - defaulttask_explicit = False - wildcard = False - - # Wild card expansion: - # Replace string such as "mc:*:bash" - # into "mc:A:bash mc:B:bash bash" - for k in targetlist: - if k.startswith("mc:") and k.count(':') >= 2: - if wildcard: - bb.fatal('multiconfig conflict') - if k.split(":")[1] == "*": - wildcard = True - for mc in self.multiconfigs: - if mc: - fulltargetlist.append(k.replace('*', mc)) - # implicit default task - else: - defaulttask_implicit = k.split(":")[2] - else: - fulltargetlist.append(k) - else: - defaulttask_explicit = True - fulltargetlist.append(k) - - if not defaulttask_explicit and defaulttask_implicit != '': - fulltargetlist.append(defaulttask_implicit) - - bb.debug(1,"Target list: %s" % (str(fulltargetlist))) - taskdata = {} - localdata = {} - - for mc in self.multiconfigs: - taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist_by_mc[mc], allowincomplete=allowincomplete) - localdata[mc] = bb.data.createCopy(self.databuilder.mcdata[mc]) - bb.data.expandKeys(localdata[mc]) - - current = 0 - runlist = [] - for k in fulltargetlist: - origk = k - mc = "" - if k.startswith("mc:") and k.count(':') >= 2: - mc = k.split(":")[1] - k = ":".join(k.split(":")[2:]) - ktask = task - if ":do_" in k: - k2 = k.split(":do_") - k = k2[0] - ktask = k2[1] - - if mc not in self.multiconfigs: - bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named %s" % (origk, mc)) - - taskdata[mc].add_provider(localdata[mc], self.recipecaches[mc], k) - current += 1 - if not ktask.startswith("do_"): - ktask = "do_%s" % ktask - if k not in taskdata[mc].build_targets or not taskdata[mc].build_targets[k]: - # e.g. in ASSUME_PROVIDED - continue - fn = taskdata[mc].build_targets[k][0] - runlist.append([mc, k, ktask, fn]) - bb.event.fire(bb.event.TreeDataPreparationProgress(current, len(fulltargetlist)), self.data) - - havemc = False - for mc in self.multiconfigs: - if taskdata[mc].get_mcdepends(): - havemc = True - - # No need to do check providers if there are no mcdeps or not an mc build - if havemc or len(self.multiconfigs) > 1: - seen = set() - new = True - # Make sure we can provide the multiconfig dependency - while new: - mcdeps = set() - # Add unresolved first, so we can get multiconfig indirect dependencies on time - for mc in self.multiconfigs: - taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc]) - mcdeps |= set(taskdata[mc].get_mcdepends()) - new = False - for k in mcdeps: - if k in seen: - continue - l = k.split(':') - depmc = l[2] - if depmc not in self.multiconfigs: - bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc)) - else: - logger.debug("Adding providers for multiconfig dependency %s" % l[3]) - taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3]) - seen.add(k) - new = True - - for mc in self.multiconfigs: - taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc]) - - bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) - return taskdata, runlist - - def prepareTreeData(self, pkgs_to_build, task, halt=False): - """ - Prepare a runqueue and taskdata object for iteration over pkgs_to_build - """ - - # We set halt to False here to prevent unbuildable targets raising - # an exception when we're just generating data - taskdata, runlist = self.buildTaskData(pkgs_to_build, task, halt, allowincomplete=True) - - return runlist, taskdata - - ######## WARNING : this function requires cache_extra to be enabled ######## - - def generateTaskDepTreeData(self, pkgs_to_build, task): - """ - Create a dependency graph of pkgs_to_build including reverse dependency - information. - """ - if not task.startswith("do_"): - task = "do_%s" % task - - runlist, taskdata = self.prepareTreeData(pkgs_to_build, task, halt=True) - rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) - rq.rqdata.prepare() - return self.buildDependTree(rq, taskdata) - - @staticmethod - def add_mc_prefix(mc, pn): - if mc: - return "mc:%s:%s" % (mc, pn) - return pn - - def buildDependTree(self, rq, taskdata): - seen_fns = [] - depend_tree = {} - depend_tree["depends"] = {} - depend_tree["tdepends"] = {} - depend_tree["pn"] = {} - depend_tree["rdepends-pn"] = {} - depend_tree["packages"] = {} - depend_tree["rdepends-pkg"] = {} - depend_tree["rrecs-pkg"] = {} - depend_tree['providermap'] = {} - depend_tree["layer-priorities"] = self.bbfile_config_priorities - - for mc in taskdata: - for name, fn in list(taskdata[mc].get_providermap().items()): - pn = self.recipecaches[mc].pkg_fn[fn] - pn = self.add_mc_prefix(mc, pn) - if name != pn: - version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[fn] - depend_tree['providermap'][name] = (pn, version) - - for tid in rq.rqdata.runtaskentries: - (mc, fn, taskname, taskfn) = bb.runqueue.split_tid_mcfn(tid) - pn = self.recipecaches[mc].pkg_fn[taskfn] - pn = self.add_mc_prefix(mc, pn) - version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[taskfn] - if pn not in depend_tree["pn"]: - depend_tree["pn"][pn] = {} - depend_tree["pn"][pn]["filename"] = taskfn - depend_tree["pn"][pn]["version"] = version - depend_tree["pn"][pn]["inherits"] = self.recipecaches[mc].inherits.get(taskfn, None) - - # if we have extra caches, list all attributes they bring in - extra_info = [] - for cache_class in self.caches_array: - if type(cache_class) is type and issubclass(cache_class, bb.cache.RecipeInfoCommon) and hasattr(cache_class, 'cachefields'): - cachefields = getattr(cache_class, 'cachefields', []) - extra_info = extra_info + cachefields - - # for all attributes stored, add them to the dependency tree - for ei in extra_info: - depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn] - - - dotname = "%s.%s" % (pn, bb.runqueue.taskname_from_tid(tid)) - if not dotname in depend_tree["tdepends"]: - depend_tree["tdepends"][dotname] = [] - for dep in rq.rqdata.runtaskentries[tid].depends: - (depmc, depfn, _, deptaskfn) = bb.runqueue.split_tid_mcfn(dep) - deppn = self.recipecaches[depmc].pkg_fn[deptaskfn] - if depmc: - depmc = "mc:" + depmc + ":" - depend_tree["tdepends"][dotname].append("%s%s.%s" % (depmc, deppn, bb.runqueue.taskname_from_tid(dep))) - if taskfn not in seen_fns: - seen_fns.append(taskfn) - packages = [] - - depend_tree["depends"][pn] = [] - for dep in taskdata[mc].depids[taskfn]: - depend_tree["depends"][pn].append(dep) - - depend_tree["rdepends-pn"][pn] = [] - for rdep in taskdata[mc].rdepids[taskfn]: - depend_tree["rdepends-pn"][pn].append(rdep) - - rdepends = self.recipecaches[mc].rundeps[taskfn] - for package in rdepends: - depend_tree["rdepends-pkg"][package] = [] - for rdepend in rdepends[package]: - depend_tree["rdepends-pkg"][package].append(rdepend) - packages.append(package) - - rrecs = self.recipecaches[mc].runrecs[taskfn] - for package in rrecs: - depend_tree["rrecs-pkg"][package] = [] - for rdepend in rrecs[package]: - depend_tree["rrecs-pkg"][package].append(rdepend) - if not package in packages: - packages.append(package) - - for package in packages: - if package not in depend_tree["packages"]: - depend_tree["packages"][package] = {} - depend_tree["packages"][package]["pn"] = pn - depend_tree["packages"][package]["filename"] = taskfn - depend_tree["packages"][package]["version"] = version - - return depend_tree - - ######## WARNING : this function requires cache_extra to be enabled ######## - def generatePkgDepTreeData(self, pkgs_to_build, task): - """ - Create a dependency tree of pkgs_to_build, returning the data. - """ - if not task.startswith("do_"): - task = "do_%s" % task - - _, taskdata = self.prepareTreeData(pkgs_to_build, task) - - seen_fns = [] - depend_tree = {} - depend_tree["depends"] = {} - depend_tree["pn"] = {} - depend_tree["rdepends-pn"] = {} - depend_tree["rdepends-pkg"] = {} - depend_tree["rrecs-pkg"] = {} - - # if we have extra caches, list all attributes they bring in - extra_info = [] - for cache_class in self.caches_array: - if type(cache_class) is type and issubclass(cache_class, bb.cache.RecipeInfoCommon) and hasattr(cache_class, 'cachefields'): - cachefields = getattr(cache_class, 'cachefields', []) - extra_info = extra_info + cachefields - - tids = [] - for mc in taskdata: - for tid in taskdata[mc].taskentries: - tids.append(tid) - - for tid in tids: - (mc, fn, taskname, taskfn) = bb.runqueue.split_tid_mcfn(tid) - - pn = self.recipecaches[mc].pkg_fn[taskfn] - pn = self.add_mc_prefix(mc, pn) - - if pn not in depend_tree["pn"]: - depend_tree["pn"][pn] = {} - depend_tree["pn"][pn]["filename"] = taskfn - version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[taskfn] - depend_tree["pn"][pn]["version"] = version - rdepends = self.recipecaches[mc].rundeps[taskfn] - rrecs = self.recipecaches[mc].runrecs[taskfn] - depend_tree["pn"][pn]["inherits"] = self.recipecaches[mc].inherits.get(taskfn, None) - - # for all extra attributes stored, add them to the dependency tree - for ei in extra_info: - depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn] - - if taskfn not in seen_fns: - seen_fns.append(taskfn) - - depend_tree["depends"][pn] = [] - for dep in taskdata[mc].depids[taskfn]: - pn_provider = "" - if dep in taskdata[mc].build_targets and taskdata[mc].build_targets[dep]: - fn_provider = taskdata[mc].build_targets[dep][0] - pn_provider = self.recipecaches[mc].pkg_fn[fn_provider] - else: - pn_provider = dep - pn_provider = self.add_mc_prefix(mc, pn_provider) - depend_tree["depends"][pn].append(pn_provider) - - depend_tree["rdepends-pn"][pn] = [] - for rdep in taskdata[mc].rdepids[taskfn]: - pn_rprovider = "" - if rdep in taskdata[mc].run_targets and taskdata[mc].run_targets[rdep]: - fn_rprovider = taskdata[mc].run_targets[rdep][0] - pn_rprovider = self.recipecaches[mc].pkg_fn[fn_rprovider] - else: - pn_rprovider = rdep - pn_rprovider = self.add_mc_prefix(mc, pn_rprovider) - depend_tree["rdepends-pn"][pn].append(pn_rprovider) - - depend_tree["rdepends-pkg"].update(rdepends) - depend_tree["rrecs-pkg"].update(rrecs) - - return depend_tree - - def generateDepTreeEvent(self, pkgs_to_build, task): - """ - Create a task dependency graph of pkgs_to_build. - Generate an event with the result - """ - depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) - bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.data) - - def generateDotGraphFiles(self, pkgs_to_build, task): - """ - Create a task dependency graph of pkgs_to_build. - Save the result to a set of .dot files. - """ - - depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) - - pns = depgraph["pn"].keys() - if pns: - with open('pn-buildlist', 'w') as f: - f.write("%s\n" % "\n".join(sorted(pns))) - logger.info("PN build list saved to 'pn-buildlist'") - - # Remove old format output files to ensure no confusion with stale data - try: - os.unlink('pn-depends.dot') - except FileNotFoundError: - pass - try: - os.unlink('package-depends.dot') - except FileNotFoundError: - pass - try: - os.unlink('recipe-depends.dot') - except FileNotFoundError: - pass - - with open('task-depends.dot', 'w') as f: - f.write("digraph depends {\n") - for task in sorted(depgraph["tdepends"]): - (pn, taskname) = task.rsplit(".", 1) - fn = depgraph["pn"][pn]["filename"] - version = depgraph["pn"][pn]["version"] - f.write('"%s.%s" [label="%s %s\\n%s\\n%s"]\n' % (pn, taskname, pn, taskname, version, fn)) - for dep in sorted(depgraph["tdepends"][task]): - f.write('"%s" -> "%s"\n' % (task, dep)) - f.write("}\n") - logger.info("Task dependencies saved to 'task-depends.dot'") - - def show_appends_with_no_recipes(self): - appends_without_recipes = {} - # Determine which bbappends haven't been applied - for mc in self.multiconfigs: - # First get list of recipes, including skipped - recipefns = list(self.recipecaches[mc].pkg_fn.keys()) - recipefns.extend(self.skiplist_by_mc[mc].keys()) - - # Work out list of bbappends that have been applied - applied_appends = [] - for fn in recipefns: - applied_appends.extend(self.collections[mc].get_file_appends(fn)) - - appends_without_recipes[mc] = [] - for _, appendfn in self.collections[mc].bbappends: - if not appendfn in applied_appends: - appends_without_recipes[mc].append(appendfn) - - msgs = [] - for mc in sorted(appends_without_recipes.keys()): - if appends_without_recipes[mc]: - msgs.append('No recipes in %s available for:\n %s' % (mc if mc else 'default', - '\n '.join(appends_without_recipes[mc]))) - - if msgs: - bb.fatal("\n".join(msgs)) - - def handlePrefProviders(self): - - for mc in self.multiconfigs: - localdata = data.createCopy(self.databuilder.mcdata[mc]) - bb.data.expandKeys(localdata) - - # Handle PREFERRED_PROVIDERS - for p in (localdata.getVar('PREFERRED_PROVIDERS') or "").split(): - try: - (providee, provider) = p.split(':') - except: - providerlog.critical("Malformed option in PREFERRED_PROVIDERS variable: %s" % p) - continue - if providee in self.recipecaches[mc].preferred and self.recipecaches[mc].preferred[providee] != provider: - providerlog.error("conflicting preferences for %s: both %s and %s specified", providee, provider, self.recipecaches[mc].preferred[providee]) - self.recipecaches[mc].preferred[providee] = provider - - def findConfigFilePath(self, configfile): - """ - Find the location on disk of configfile and if it exists and was parsed by BitBake - emit the ConfigFilePathFound event with the path to the file. - """ - path = bb.cookerdata.findConfigFile(configfile, self.data) - if not path: - return - - # Generate a list of parsed configuration files by searching the files - # listed in the __depends and __base_depends variables with a .conf suffix. - conffiles = [] - dep_files = self.data.getVar('__base_depends', False) or [] - dep_files = dep_files + (self.data.getVar('__depends', False) or []) - - for f in dep_files: - if f[0].endswith(".conf"): - conffiles.append(f[0]) - - _, conf, conffile = path.rpartition("conf/") - match = os.path.join(conf, conffile) - # Try and find matches for conf/conffilename.conf as we don't always - # have the full path to the file. - for cfg in conffiles: - if cfg.endswith(match): - bb.event.fire(bb.event.ConfigFilePathFound(path), - self.data) - break - - def findFilesMatchingInDir(self, filepattern, directory): - """ - Searches for files containing the substring 'filepattern' which are children of - 'directory' in each BBPATH. i.e. to find all rootfs package classes available - to BitBake one could call findFilesMatchingInDir(self, 'rootfs_', 'classes') - or to find all machine configuration files one could call: - findFilesMatchingInDir(self, '.conf', 'conf/machine') - """ - - matches = [] - bbpaths = self.data.getVar('BBPATH').split(':') - for path in bbpaths: - dirpath = os.path.join(path, directory) - if os.path.exists(dirpath): - for root, dirs, files in os.walk(dirpath): - for f in files: - if filepattern in f: - matches.append(f) - - if matches: - bb.event.fire(bb.event.FilesMatchingFound(filepattern, matches), self.data) - - def testCookerCommandEvent(self, filepattern): - # Dummy command used by OEQA selftest to test tinfoil without IO - matches = ["A", "B"] - bb.event.fire(bb.event.FilesMatchingFound(filepattern, matches), self.data) - - def findProviders(self, mc=''): - return bb.providers.findProviders(self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn) - - def findBestProvider(self, pn, mc=''): - if pn in self.recipecaches[mc].providers: - filenames = self.recipecaches[mc].providers[pn] - eligible, foundUnique = bb.providers.filterProviders(filenames, pn, self.databuilder.mcdata[mc], self.recipecaches[mc]) - if eligible is not None: - filename = eligible[0] - else: - filename = None - return None, None, None, filename - elif pn in self.recipecaches[mc].pkg_pn: - (latest, latest_f, preferred_ver, preferred_file, required) = bb.providers.findBestProvider(pn, self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn) - if required and preferred_file is None: - return None, None, None, None - return (latest, latest_f, preferred_ver, preferred_file) - else: - return None, None, None, None - - def findConfigFiles(self, varname): - """ - Find config files which are appropriate values for varname. - i.e. MACHINE, DISTRO - """ - possible = [] - var = varname.lower() - - data = self.data - # iterate configs - bbpaths = data.getVar('BBPATH').split(':') - for path in bbpaths: - confpath = os.path.join(path, "conf", var) - if os.path.exists(confpath): - for root, dirs, files in os.walk(confpath): - # get all child files, these are appropriate values - for f in files: - val, sep, end = f.rpartition('.') - if end == 'conf': - possible.append(val) - - if possible: - bb.event.fire(bb.event.ConfigFilesFound(var, possible), self.data) - - def findInheritsClass(self, klass): - """ - Find all recipes which inherit the specified class - """ - pkg_list = [] - - for pfn in self.recipecaches[''].pkg_fn: - inherits = self.recipecaches[''].inherits.get(pfn, None) - if inherits and klass in inherits: - pkg_list.append(self.recipecaches[''].pkg_fn[pfn]) - - return pkg_list - - def generateTargetsTree(self, klass=None, pkgs=None): - """ - Generate a dependency tree of buildable targets - Generate an event with the result - """ - # if the caller hasn't specified a pkgs list default to universe - if not pkgs: - pkgs = ['universe'] - # if inherited_class passed ensure all recipes which inherit the - # specified class are included in pkgs - if klass: - extra_pkgs = self.findInheritsClass(klass) - pkgs = pkgs + extra_pkgs - - # generate a dependency tree for all our packages - tree = self.generatePkgDepTreeData(pkgs, 'build') - bb.event.fire(bb.event.TargetsTreeGenerated(tree), self.data) - - def interactiveMode( self ): - """Drop off into a shell""" - try: - from bb import shell - except ImportError: - parselog.exception("Interactive mode not available") - raise bb.BBHandledException() - else: - shell.start( self ) - - - def handleCollections(self, collections): - """Handle collections""" - errors = False - self.bbfile_config_priorities = [] - if collections: - collection_priorities = {} - collection_depends = {} - collection_list = collections.split() - min_prio = 0 - for c in collection_list: - bb.debug(1,'Processing %s in collection list' % (c)) - - # Get collection priority if defined explicitly - priority = self.data.getVar("BBFILE_PRIORITY_%s" % c) - if priority: - try: - prio = int(priority) - except ValueError: - parselog.error("invalid value for BBFILE_PRIORITY_%s: \"%s\"", c, priority) - errors = True - if min_prio == 0 or prio < min_prio: - min_prio = prio - collection_priorities[c] = prio - else: - collection_priorities[c] = None - - # Check dependencies and store information for priority calculation - deps = self.data.getVar("LAYERDEPENDS_%s" % c) - if deps: - try: - depDict = bb.utils.explode_dep_versions2(deps) - except bb.utils.VersionStringException as vse: - bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (c, str(vse))) - for dep, oplist in list(depDict.items()): - if dep in collection_list: - for opstr in oplist: - layerver = self.data.getVar("LAYERVERSION_%s" % dep) - (op, depver) = opstr.split() - if layerver: - try: - res = bb.utils.vercmp_string_op(layerver, depver, op) - except bb.utils.VersionStringException as vse: - bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (c, str(vse))) - if not res: - parselog.error("Layer '%s' depends on version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, dep, layerver) - errors = True - else: - parselog.error("Layer '%s' depends on version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, dep) - errors = True - else: - parselog.error("Layer '%s' depends on layer '%s', but this layer is not enabled in your configuration", c, dep) - errors = True - collection_depends[c] = list(depDict.keys()) - else: - collection_depends[c] = [] - - # Check recommends and store information for priority calculation - recs = self.data.getVar("LAYERRECOMMENDS_%s" % c) - if recs: - try: - recDict = bb.utils.explode_dep_versions2(recs) - except bb.utils.VersionStringException as vse: - bb.fatal('Error parsing LAYERRECOMMENDS_%s: %s' % (c, str(vse))) - for rec, oplist in list(recDict.items()): - if rec in collection_list: - if oplist: - opstr = oplist[0] - layerver = self.data.getVar("LAYERVERSION_%s" % rec) - if layerver: - (op, recver) = opstr.split() - try: - res = bb.utils.vercmp_string_op(layerver, recver, op) - except bb.utils.VersionStringException as vse: - bb.fatal('Error parsing LAYERRECOMMENDS_%s: %s' % (c, str(vse))) - if not res: - parselog.debug3("Layer '%s' recommends version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec, layerver) - continue - else: - parselog.debug3("Layer '%s' recommends version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec) - continue - parselog.debug3("Layer '%s' recommends layer '%s', so we are adding it", c, rec) - collection_depends[c].append(rec) - else: - parselog.debug3("Layer '%s' recommends layer '%s', but this layer is not enabled in your configuration", c, rec) - - # Recursively work out collection priorities based on dependencies - def calc_layer_priority(collection): - if not collection_priorities[collection]: - max_depprio = min_prio - for dep in collection_depends[collection]: - calc_layer_priority(dep) - depprio = collection_priorities[dep] - if depprio > max_depprio: - max_depprio = depprio - max_depprio += 1 - parselog.debug("Calculated priority of layer %s as %d", collection, max_depprio) - collection_priorities[collection] = max_depprio - - # Calculate all layer priorities using calc_layer_priority and store in bbfile_config_priorities - for c in collection_list: - calc_layer_priority(c) - regex = self.data.getVar("BBFILE_PATTERN_%s" % c) - if regex is None: - parselog.error("BBFILE_PATTERN_%s not defined" % c) - errors = True - continue - elif regex == "": - parselog.debug("BBFILE_PATTERN_%s is empty" % c) - cre = re.compile('^NULL$') - errors = False - else: - try: - cre = re.compile(regex) - except re.error: - parselog.error("BBFILE_PATTERN_%s \"%s\" is not a valid regular expression", c, regex) - errors = True - continue - self.bbfile_config_priorities.append((c, regex, cre, collection_priorities[c])) - if errors: - # We've already printed the actual error(s) - raise CollectionError("Errors during parsing layer configuration") - - def buildSetVars(self): - """ - Setup any variables needed before starting a build - """ - t = time.gmtime() - for mc in self.databuilder.mcdata: - ds = self.databuilder.mcdata[mc] - if not ds.getVar("BUILDNAME", False): - ds.setVar("BUILDNAME", "${DATE}${TIME}") - ds.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S', t)) - ds.setVar("DATE", time.strftime('%Y%m%d', t)) - ds.setVar("TIME", time.strftime('%H%M%S', t)) - - def reset_mtime_caches(self): - """ - Reset mtime caches - this is particularly important when memory resident as something - which is cached is not unlikely to have changed since the last invocation (e.g. a - file associated with a recipe might have been modified by the user). - """ - build.reset_cache() - bb.fetch._checksum_cache.mtime_cache.clear() - siggen_cache = getattr(bb.parse.siggen, 'checksum_cache', None) - if siggen_cache: - bb.parse.siggen.checksum_cache.mtime_cache.clear() - - def matchFiles(self, bf, mc=''): - """ - Find the .bb files which match the expression in 'buildfile'. - """ - if bf.startswith("/") or bf.startswith("../"): - bf = os.path.abspath(bf) - - collections = {mc: CookerCollectFiles(self.bbfile_config_priorities, mc)} - filelist, masked, searchdirs = collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc]) - try: - os.stat(bf) - bf = os.path.abspath(bf) - return [bf] - except OSError: - regexp = re.compile(bf) - matches = [] - for f in filelist: - if regexp.search(f) and os.path.isfile(f): - matches.append(f) - return matches - - def matchFile(self, buildfile, mc=''): - """ - Find the .bb file which matches the expression in 'buildfile'. - Raise an error if multiple files - """ - matches = self.matchFiles(buildfile, mc) - if len(matches) != 1: - if matches: - msg = "Unable to match '%s' to a specific recipe file - %s matches found:" % (buildfile, len(matches)) - if matches: - for f in matches: - msg += "\n %s" % f - parselog.error(msg) - else: - parselog.error("Unable to find any recipe file matching '%s'" % buildfile) - raise NoSpecificMatch - return matches[0] - - def buildFile(self, buildfile, task): - """ - Build the file matching regexp buildfile - """ - bb.event.fire(bb.event.BuildInit(), self.data) - - # Too many people use -b because they think it's how you normally - # specify a target to be built, so show a warning - bb.warn("Buildfile specified, dependencies will not be handled. If this is not what you want, do not use -b / --buildfile.") - - self.buildFileInternal(buildfile, task) - - def buildFileInternal(self, buildfile, task, fireevents=True, quietlog=False): - """ - Build the file matching regexp buildfile - """ - - # Parse the configuration here. We need to do it explicitly here since - # buildFile() doesn't use the cache - self.parseConfiguration() - - # If we are told to do the None task then query the default task - if task is None: - task = self.configuration.cmd - if not task.startswith("do_"): - task = "do_%s" % task - - fn, cls, mc = bb.cache.virtualfn2realfn(buildfile) - fn = self.matchFile(fn, mc) - - self.buildSetVars() - self.reset_mtime_caches() - - bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.databuilder.data_hash, self.caches_array) - - layername = self.collections[mc].calc_bbfile_priority(fn)[2] - infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername) - infos = dict(infos) - - fn = bb.cache.realfn2virtual(fn, cls, mc) - try: - info_array = infos[fn] - except KeyError: - bb.fatal("%s does not exist" % fn) - - if info_array[0].skipped: - bb.fatal("%s was skipped: %s" % (fn, info_array[0].skipreason)) - - self.recipecaches[mc].add_from_recipeinfo(fn, info_array) - - # Tweak some variables - item = info_array[0].pn - self.recipecaches[mc].ignored_dependencies = set() - self.recipecaches[mc].bbfile_priority[fn] = 1 - self.configuration.limited_deps = True - - # Remove external dependencies - self.recipecaches[mc].task_deps[fn]['depends'] = {} - self.recipecaches[mc].deps[fn] = [] - self.recipecaches[mc].rundeps[fn] = defaultdict(list) - self.recipecaches[mc].runrecs[fn] = defaultdict(list) - - bb.parse.siggen.setup_datacache(self.recipecaches) - - # Invalidate task for target if force mode active - if self.configuration.force: - logger.verbose("Invalidate task %s, %s", task, fn) - bb.parse.siggen.invalidate_task(task, fn) - - # Setup taskdata structure - taskdata = {} - taskdata[mc] = bb.taskdata.TaskData(self.configuration.halt) - taskdata[mc].add_provider(self.databuilder.mcdata[mc], self.recipecaches[mc], item) - - if quietlog: - rqloglevel = bb.runqueue.logger.getEffectiveLevel() - bb.runqueue.logger.setLevel(logging.WARNING) - - buildname = self.databuilder.mcdata[mc].getVar("BUILDNAME") - if fireevents: - bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.databuilder.mcdata[mc]) - if self.eventlog: - self.eventlog[2].write_variables() - bb.event.enable_heartbeat() - - # Execute the runqueue - runlist = [[mc, item, task, fn]] - - rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) - - def buildFileIdle(server, rq, halt): - - msg = None - interrupted = 0 - if halt or self.state == State.FORCE_SHUTDOWN: - rq.finish_runqueue(True) - msg = "Forced shutdown" - interrupted = 2 - elif self.state == State.SHUTDOWN: - rq.finish_runqueue(False) - msg = "Stopped build" - interrupted = 1 - failures = 0 - try: - retval = rq.execute_runqueue() - except runqueue.TaskFailure as exc: - failures += len(exc.args) - retval = False - except SystemExit as exc: - if quietlog: - bb.runqueue.logger.setLevel(rqloglevel) - return bb.server.process.idleFinish(str(exc)) - - if not retval: - if fireevents: - bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.databuilder.mcdata[mc]) - bb.event.disable_heartbeat() - # We trashed self.recipecaches above - self._parsecache_set(False) - self.configuration.limited_deps = False - bb.parse.siggen.reset(self.data) - if quietlog: - bb.runqueue.logger.setLevel(rqloglevel) - return bb.server.process.idleFinish(msg) - - return retval - - self.idleCallBackRegister(buildFileIdle, rq) - - def getTaskSignatures(self, target, tasks): - sig = [] - getAllTaskSignatures = False - - if not tasks: - tasks = ["do_build"] - getAllTaskSignatures = True - - for task in tasks: - taskdata, runlist = self.buildTaskData(target, task, self.configuration.halt) - rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) - rq.rqdata.prepare() - - for l in runlist: - mc, pn, taskname, fn = l - - taskdep = rq.rqdata.dataCaches[mc].task_deps[fn] - for t in taskdep['tasks']: - if t in taskdep['nostamp'] or "setscene" in t: - continue - tid = bb.runqueue.build_tid(mc, fn, t) - - if t in task or getAllTaskSignatures: - try: - sig.append([pn, t, rq.rqdata.get_task_unihash(tid)]) - except KeyError: - sig.append(self.getTaskSignatures(target, [t])[0]) - - return sig - - def buildTargets(self, targets, task): - """ - Attempt to build the targets specified - """ - - def buildTargetsIdle(server, rq, halt): - msg = None - interrupted = 0 - if halt or self.state == State.FORCE_SHUTDOWN: - bb.event._should_exit.set() - rq.finish_runqueue(True) - msg = "Forced shutdown" - interrupted = 2 - elif self.state == State.SHUTDOWN: - rq.finish_runqueue(False) - msg = "Stopped build" - interrupted = 1 - failures = 0 - try: - retval = rq.execute_runqueue() - except runqueue.TaskFailure as exc: - failures += len(exc.args) - retval = False - except SystemExit as exc: - return bb.server.process.idleFinish(str(exc)) - - if not retval: - try: - for mc in self.multiconfigs: - bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, targets, failures, interrupted), self.databuilder.mcdata[mc]) - finally: - bb.event.disable_heartbeat() - return bb.server.process.idleFinish(msg) - - return retval - - self.reset_mtime_caches() - self.buildSetVars() - - # If we are told to do the None task then query the default task - if task is None: - task = self.configuration.cmd - - if not task.startswith("do_"): - task = "do_%s" % task - - packages = [target if ':' in target else '%s:%s' % (target, task) for target in targets] - - bb.event.fire(bb.event.BuildInit(packages), self.data) - - taskdata, runlist = self.buildTaskData(targets, task, self.configuration.halt) - - buildname = self.data.getVar("BUILDNAME", False) - - # make targets to always look as :do_ - ntargets = [] - for target in runlist: - if target[0]: - ntargets.append("mc:%s:%s:%s" % (target[0], target[1], target[2])) - ntargets.append("%s:%s" % (target[1], target[2])) - - for mc in self.multiconfigs: - bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.databuilder.mcdata[mc]) - if self.eventlog: - self.eventlog[2].write_variables() - bb.event.enable_heartbeat() - - rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) - if 'universe' in targets: - rq.rqdata.warn_multi_bb = True - - self.idleCallBackRegister(buildTargetsIdle, rq) - - - def getAllKeysWithFlags(self, flaglist): - def dummy_autorev(d): - return - - dump = {} - # Horrible but for now we need to avoid any sideeffects of autorev being called - saved = bb.fetch2.get_autorev - bb.fetch2.get_autorev = dummy_autorev - for k in self.data.keys(): - try: - expand = True - flags = self.data.getVarFlags(k) - if flags and "func" in flags and "python" in flags: - expand = False - v = self.data.getVar(k, expand) - if not k.startswith("__") and not isinstance(v, bb.data_smart.DataSmart): - dump[k] = { - 'v' : str(v) , - 'history' : self.data.varhistory.variable(k), - } - for d in flaglist: - if flags and d in flags: - dump[k][d] = flags[d] - else: - dump[k][d] = None - except Exception as e: - print(e) - bb.fetch2.get_autorev = saved - return dump - - - def updateCacheSync(self): - if self.state == State.RUNNING: - return - - if not self.baseconfig_valid: - logger.debug("Reloading base configuration data") - self.initConfigurationData() - self.handlePRServ() - - # This is called for all async commands when self.state != running - def updateCache(self): - if self.state == State.RUNNING: - return - - if self.state in (State.SHUTDOWN, State.FORCE_SHUTDOWN, State.ERROR): - if hasattr(self.parser, 'shutdown'): - self.parser.shutdown(clean=False) - self.parser.final_cleanup() - raise bb.BBHandledException() - - if self.state != State.PARSING: - self.updateCacheSync() - - if self.state != State.PARSING and not self.parsecache_valid: - bb.server.process.serverlog("Parsing started") - self.parsewatched = {} - - bb.parse.siggen.reset(self.data) - self.parseConfiguration () - if CookerFeatures.SEND_SANITYEVENTS in self.featureset: - for mc in self.multiconfigs: - bb.event.fire(bb.event.SanityCheck(False), self.databuilder.mcdata[mc]) - - for mc in self.multiconfigs: - ignore = self.databuilder.mcdata[mc].getVar("ASSUME_PROVIDED") or "" - self.recipecaches[mc].ignored_dependencies = set(ignore.split()) - - for dep in self.configuration.extra_assume_provided: - self.recipecaches[mc].ignored_dependencies.add(dep) - - mcfilelist = {} - total_masked = 0 - searchdirs = set() - for mc in self.multiconfigs: - (filelist, masked, search) = self.collections[mc].collect_bbfiles(self.databuilder.mcdata[mc], self.databuilder.mcdata[mc]) - - mcfilelist[mc] = filelist - total_masked += masked - searchdirs |= set(search) - - # Add mtimes for directories searched for bb/bbappend files - for dirent in searchdirs: - self.add_filewatch([(dirent, bb.parse.cached_mtime_noerror(dirent))]) - - self.parser = CookerParser(self, mcfilelist, total_masked) - self._parsecache_set(True) - - self.state = State.PARSING - - if not self.parser.parse_next(): - bb.server.process.serverlog("Parsing completed") - collectlog.debug("parsing complete") - if self.parser.error: - raise bb.BBHandledException() - self.show_appends_with_no_recipes() - self.handlePrefProviders() - for mc in self.multiconfigs: - self.recipecaches[mc].bbfile_priority = self.collections[mc].collection_priorities(self.recipecaches[mc].pkg_fn, self.parser.mcfilelist[mc], self.data) - self.state = State.RUNNING - - # Send an event listing all stamps reachable after parsing - # which the metadata may use to clean up stale data - for mc in self.multiconfigs: - event = bb.event.ReachableStamps(self.recipecaches[mc].stamp) - bb.event.fire(event, self.databuilder.mcdata[mc]) - return None - - return True - - def checkPackages(self, pkgs_to_build, task=None): - - # Return a copy, don't modify the original - pkgs_to_build = pkgs_to_build[:] - - if not pkgs_to_build: - raise NothingToBuild - - ignore = (self.data.getVar("ASSUME_PROVIDED") or "").split() - for pkg in pkgs_to_build.copy(): - if pkg in ignore: - parselog.warning("Explicit target \"%s\" is in ASSUME_PROVIDED, ignoring" % pkg) - if pkg.startswith("multiconfig:"): - pkgs_to_build.remove(pkg) - pkgs_to_build.append(pkg.replace("multiconfig:", "mc:")) - - if 'world' in pkgs_to_build: - pkgs_to_build.remove('world') - for mc in self.multiconfigs: - bb.providers.buildWorldTargetList(self.recipecaches[mc], task) - for t in self.recipecaches[mc].world_target: - if mc: - t = "mc:" + mc + ":" + t - pkgs_to_build.append(t) - - if 'universe' in pkgs_to_build: - parselog.verbnote("The \"universe\" target is only intended for testing and may produce errors.") - parselog.debug("collating packages for \"universe\"") - pkgs_to_build.remove('universe') - for mc in self.multiconfigs: - for t in self.recipecaches[mc].universe_target: - if task: - foundtask = False - for provider_fn in self.recipecaches[mc].providers[t]: - if task in self.recipecaches[mc].task_deps[provider_fn]['tasks']: - foundtask = True - break - if not foundtask: - bb.debug(1, "Skipping %s for universe tasks as task %s doesn't exist" % (t, task)) - continue - if mc: - t = "mc:" + mc + ":" + t - pkgs_to_build.append(t) - - return pkgs_to_build - - def pre_serve(self): - return - - def post_serve(self): - self.shutdown(force=True) - prserv.serv.auto_shutdown() - if hasattr(bb.parse, "siggen"): - bb.parse.siggen.exit() - if self.hashserv: - self.hashserv.process.terminate() - self.hashserv.process.join() - if hasattr(self, "data"): - bb.event.fire(CookerExit(), self.data) - - def shutdown(self, force=False): - if force: - self.state = State.FORCE_SHUTDOWN - bb.event._should_exit.set() - else: - self.state = State.SHUTDOWN - - if self.parser: - self.parser.shutdown(clean=False) - self.parser.final_cleanup() - - def finishcommand(self): - if hasattr(self.parser, 'shutdown'): - self.parser.shutdown(clean=False) - self.parser.final_cleanup() - self.state = State.INITIAL - bb.event._should_exit.clear() - - def reset(self): - if hasattr(bb.parse, "siggen"): - bb.parse.siggen.exit() - self.finishcommand() - self.initConfigurationData() - self.handlePRServ() - - def clientComplete(self): - """Called when the client is done using the server""" - self.finishcommand() - self.extraconfigdata = {} - self.command.reset() - if hasattr(self, "data"): - self.databuilder.reset() - self.data = self.databuilder.data - # In theory tinfoil could have modified the base data before parsing, - # ideally need to track if anything did modify the datastore - self._parsecache_set(False) - -class CookerExit(bb.event.Event): - """ - Notify clients of the Cooker shutdown - """ - - def __init__(self): - bb.event.Event.__init__(self) - - -class CookerCollectFiles(object): - def __init__(self, priorities, mc=''): - self.mc = mc - self.bbappends = [] - # Priorities is a list of tuples, with the second element as the pattern. - # We need to sort the list with the longest pattern first, and so on to - # the shortest. This allows nested layers to be properly evaluated. - self.bbfile_config_priorities = sorted(priorities, key=lambda tup: tup[1], reverse=True) - - def calc_bbfile_priority(self, filename): - for layername, _, regex, pri in self.bbfile_config_priorities: - if regex.match(filename): - return pri, regex, layername - return 0, None, None - - def get_bbfiles(self): - """Get list of default .bb files by reading out the current directory""" - path = os.getcwd() - contents = os.listdir(path) - bbfiles = [] - for f in contents: - if f.endswith(".bb"): - bbfiles.append(os.path.abspath(os.path.join(path, f))) - return bbfiles - - def find_bbfiles(self, path): - """Find all the .bb and .bbappend files in a directory""" - found = [] - for dir, dirs, files in os.walk(path): - for ignored in ('SCCS', 'CVS', '.svn'): - if ignored in dirs: - dirs.remove(ignored) - found += [os.path.join(dir, f) for f in files if (f.endswith(('.bb', '.bbappend')))] - - return found - - def collect_bbfiles(self, config, eventdata): - """Collect all available .bb build files""" - masked = 0 - - collectlog.debug("collecting .bb files") - - files = (config.getVar( "BBFILES") or "").split() - - # Sort files by priority - files.sort( key=lambda fileitem: self.calc_bbfile_priority(fileitem)[0] ) - config.setVar("BBFILES_PRIORITIZED", " ".join(files)) - - if not files: - files = self.get_bbfiles() - - if not files: - collectlog.error("no recipe files to build, check your BBPATH and BBFILES?") - bb.event.fire(CookerExit(), eventdata) - - # We need to track where we look so that we can know when the cache is invalid. There - # is no nice way to do this, this is horrid. We intercept the os.listdir() and os.scandir() - # calls while we run glob(). - origlistdir = os.listdir - if hasattr(os, 'scandir'): - origscandir = os.scandir - searchdirs = [] - - def ourlistdir(d): - searchdirs.append(d) - return origlistdir(d) - - def ourscandir(d): - searchdirs.append(d) - return origscandir(d) - - os.listdir = ourlistdir - if hasattr(os, 'scandir'): - os.scandir = ourscandir - try: - # Can't use set here as order is important - newfiles = [] - for f in files: - if os.path.isdir(f): - dirfiles = self.find_bbfiles(f) - for g in dirfiles: - if g not in newfiles: - newfiles.append(g) - else: - globbed = glob.glob(f) - if not globbed and os.path.exists(f): - globbed = [f] - # glob gives files in order on disk. Sort to be deterministic. - for g in sorted(globbed): - if g not in newfiles: - newfiles.append(g) - finally: - os.listdir = origlistdir - if hasattr(os, 'scandir'): - os.scandir = origscandir - - bbmask = config.getVar('BBMASK') - - if bbmask: - # First validate the individual regular expressions and ignore any - # that do not compile - bbmasks = [] - for mask in bbmask.split(): - # When constructing an older style single regex, it's possible for BBMASK - # to end up beginning with '|', which matches and masks _everything_. - if mask.startswith("|"): - collectlog.warning("BBMASK contains regular expression beginning with '|', fixing: %s" % mask) - mask = mask[1:] - try: - re.compile(mask) - bbmasks.append(mask) - except re.error: - collectlog.critical("BBMASK contains an invalid regular expression, ignoring: %s" % mask) - - # Then validate the combined regular expressions. This should never - # fail, but better safe than sorry... - bbmask = "|".join(bbmasks) - try: - bbmask_compiled = re.compile(bbmask) - except re.error: - collectlog.critical("BBMASK is not a valid regular expression, ignoring: %s" % bbmask) - bbmask = None - - bbfiles = [] - bbappend = [] - for f in newfiles: - if bbmask and bbmask_compiled.search(f): - collectlog.debug("skipping masked file %s", f) - masked += 1 - continue - if f.endswith('.bb'): - bbfiles.append(f) - elif f.endswith('.bbappend'): - bbappend.append(f) - else: - collectlog.debug("skipping %s: unknown file extension", f) - - # Build a list of .bbappend files for each .bb file - for f in bbappend: - base = os.path.basename(f).replace('.bbappend', '.bb') - self.bbappends.append((base, f)) - - # Find overlayed recipes - # bbfiles will be in priority order which makes this easy - bbfile_seen = dict() - self.overlayed = defaultdict(list) - for f in reversed(bbfiles): - base = os.path.basename(f) - if base not in bbfile_seen: - bbfile_seen[base] = f - else: - topfile = bbfile_seen[base] - self.overlayed[topfile].append(f) - - return (bbfiles, masked, searchdirs) - - def get_file_appends(self, fn): - """ - Returns a list of .bbappend files to apply to fn - """ - filelist = [] - f = os.path.basename(fn) - for b in self.bbappends: - (bbappend, filename) = b - if (bbappend == f) or ('%' in bbappend and bbappend.startswith(f[:bbappend.index('%')])): - filelist.append(filename) - return tuple(filelist) - - def collection_priorities(self, pkgfns, fns, d): - # Return the priorities of the entries in pkgfns - # Also check that all the regexes in self.bbfile_config_priorities are used - # (but to do that we need to ensure skipped recipes aren't counted, nor - # collections in BBFILE_PATTERN_IGNORE_EMPTY) - - priorities = {} - seen = set() - matched = set() - - matched_regex = set() - unmatched_regex = set() - for _, _, regex, _ in self.bbfile_config_priorities: - unmatched_regex.add(regex) - - # Calculate priorities for each file - for p in pkgfns: - realfn, cls, mc = bb.cache.virtualfn2realfn(p) - priorities[p], regex, _ = self.calc_bbfile_priority(realfn) - if regex in unmatched_regex: - matched_regex.add(regex) - unmatched_regex.remove(regex) - seen.add(realfn) - if regex: - matched.add(realfn) - - if unmatched_regex: - # Account for bbappend files - for b in self.bbappends: - (bbfile, append) = b - seen.add(append) - - # Account for skipped recipes - seen.update(fns) - - seen.difference_update(matched) - - def already_matched(fn): - for regex in matched_regex: - if regex.match(fn): - return True - return False - - for unmatch in unmatched_regex.copy(): - for fn in seen: - if unmatch.match(fn): - # If the bbappend or file was already matched by another regex, skip it - # e.g. for a layer within a layer, the outer regex could match, the inner - # regex may match nothing and we should warn about that - if already_matched(fn): - continue - unmatched_regex.remove(unmatch) - break - - for collection, pattern, regex, _ in self.bbfile_config_priorities: - if regex in unmatched_regex: - if d.getVar('BBFILE_PATTERN_IGNORE_EMPTY_%s' % collection) != '1': - collectlog.warning("No bb files in %s matched BBFILE_PATTERN_%s '%s'" % (self.mc if self.mc else 'default', - collection, pattern)) - - return priorities - -class ParsingFailure(Exception): - def __init__(self, realexception, recipe): - self.realexception = realexception - self.recipe = recipe - Exception.__init__(self, realexception, recipe) - -class Parser(multiprocessing.Process): - def __init__(self, jobs, next_job_id, results, quit, profile): - self.jobs = jobs - self.next_job_id = next_job_id - self.results = results - self.quit = quit - multiprocessing.Process.__init__(self) - self.context = bb.utils.get_context().copy() - self.handlers = bb.event.get_class_handlers().copy() - self.profile = profile - self.queue_signals = False - self.signal_received = [] - self.signal_threadlock = threading.Lock() - self.exit = False - - def catch_sig(self, signum, frame): - if self.queue_signals: - self.signal_received.append(signum) - else: - self.handle_sig(signum, frame) - - def handle_sig(self, signum, frame): - if signum == signal.SIGTERM: - signal.signal(signal.SIGTERM, signal.SIG_DFL) - os.kill(os.getpid(), signal.SIGTERM) - elif signum == signal.SIGINT: - self.exit = True - - def run(self): - bb.utils.profile_function("parsing" in self.profile, self.realrun, "profile-parse-%s.log" % multiprocessing.current_process().name, process=False) - - def realrun(self): - # Signal handling here is hard. We must not terminate any process or thread holding the write - # lock for the event stream as it will not be released, ever, and things will hang. - # Python handles signals in the main thread/process but they can be raised from any thread and - # we want to defer processing of any SIGTERM/SIGINT signal until we're outside the critical section - # and don't hold the lock (see server/process.py). We therefore always catch the signals (so any - # new thread should also do so) and we defer handling but we handle with the local thread lock - # held (a threading lock, not a multiprocessing one) so that no other thread in the process - # can be in the critical section. - signal.signal(signal.SIGTERM, self.catch_sig) - signal.signal(signal.SIGHUP, signal.SIG_DFL) - signal.signal(signal.SIGINT, self.catch_sig) - bb.utils.set_process_name(multiprocessing.current_process().name) - multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, exitpriority=1) - multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1) - - pending = [] - havejobs = True - try: - while (havejobs or pending) and not self.exit: - if self.quit.is_set(): - break - - job = None - if havejobs: - with self.next_job_id.get_lock(): - if self.next_job_id.value < len(self.jobs): - job = self.jobs[self.next_job_id.value] - self.next_job_id.value += 1 - else: - havejobs = False - - if job: - result = self.parse(*job) - # Clear the siggen cache after parsing to control memory usage, its huge - bb.parse.siggen.postparsing_clean_cache() - pending.append(result) - - if pending: - try: - result = pending.pop() - self.results.put(result, timeout=0.05) - except queue.Full: - pending.append(result) - finally: - self.results.close() - self.results.join_thread() - - def parse(self, mc, cache, filename, appends, layername): - try: - origfilter = bb.event.LogHandler.filter - # Record the filename we're parsing into any events generated - def parse_filter(self, record): - record.taskpid = bb.event.worker_pid - record.fn = filename - return True - - # Reset our environment and handlers to the original settings - bb.utils.set_context(self.context.copy()) - bb.event.set_class_handlers(self.handlers.copy()) - bb.event.LogHandler.filter = parse_filter - - return True, mc, cache.parse(filename, appends, layername) - except Exception as exc: - tb = sys.exc_info()[2] - exc.recipe = filename - return True, None, exc - # Need to turn BaseExceptions into Exceptions here so we gracefully shutdown - # and for example a worker thread doesn't just exit on its own in response to - # a SystemExit event for example. - except BaseException as exc: - return True, None, ParsingFailure(exc, filename) - finally: - bb.event.LogHandler.filter = origfilter - -class CookerParser(object): - def __init__(self, cooker, mcfilelist, masked): - self.mcfilelist = mcfilelist - self.cooker = cooker - self.cfgdata = cooker.data - self.cfghash = cooker.databuilder.data_hash - self.cfgbuilder = cooker.databuilder - - # Accounting statistics - self.parsed = 0 - self.cached = 0 - self.error = 0 - self.masked = masked - - self.skipped = 0 - self.virtuals = 0 - - self.current = 0 - self.process_names = [] - - self.bb_caches = bb.cache.MulticonfigCache(self.cfgbuilder, self.cfghash, cooker.caches_array) - self.fromcache = set() - self.willparse = [] - for mc in self.cooker.multiconfigs: - for filename in self.mcfilelist[mc]: - appends = self.cooker.collections[mc].get_file_appends(filename) - layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] - if not self.bb_caches[mc].cacheValid(filename, appends): - self.willparse.append((mc, self.bb_caches[mc], filename, appends, layername)) - else: - self.fromcache.add((mc, self.bb_caches[mc], filename, appends, layername)) - - self.total = len(self.fromcache) + len(self.willparse) - self.toparse = len(self.willparse) - self.progress_chunk = int(max(self.toparse / 100, 1)) - - self.num_processes = min(int(self.cfgdata.getVar("BB_NUMBER_PARSE_THREADS") or - multiprocessing.cpu_count()), self.toparse) - - bb.cache.SiggenRecipeInfo.reset() - self.start() - self.haveshutdown = False - self.syncthread = None - - def start(self): - self.results = self.load_cached() - self.processes = [] - - if self.toparse: - bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) - - next_job_id = multiprocessing.Value(ctypes.c_int, 0) - self.parser_quit = multiprocessing.Event() - self.result_queue = multiprocessing.Queue() - - # Have to pass in willparse at fork time so all parsing processes have the unpickleable data - # then access it by index from the parse queue. - for i in range(0, self.num_processes): - parser = Parser(self.willparse, next_job_id, self.result_queue, self.parser_quit, self.cooker.configuration.profile) - parser.start() - self.process_names.append(parser.name) - self.processes.append(parser) - - self.results = itertools.chain(self.results, self.parse_generator()) - - def shutdown(self, clean=True, eventmsg="Parsing halted due to errors"): - if not self.toparse: - return - if self.haveshutdown: - return - self.haveshutdown = True - - if clean: - event = bb.event.ParseCompleted(self.cached, self.parsed, - self.skipped, self.masked, - self.virtuals, self.error, - self.total) - - bb.event.fire(event, self.cfgdata) - else: - bb.event.fire(bb.event.ParseError(eventmsg), self.cfgdata) - bb.error("Parsing halted due to errors, see error messages above") - - # Cleanup the queue before call process.join(), otherwise there might be - # deadlocks. - def read_results(): - while True: - try: - self.result_queue.get(timeout=0.25) - except queue.Empty: - break - except KeyError: - # The restore state from SiggenRecipeInfo in cache.py can - # fail here if this is an unclean shutdown since the state may have been - # reset. Ignore key errors for that reason, we don't care. - pass - - def sync_caches(): - for c in self.bb_caches.values(): - bb.cache.SiggenRecipeInfo.reset() - c.sync() - - self.syncthread = threading.Thread(target=sync_caches, name="SyncThread") - self.syncthread.start() - - self.parser_quit.set() - - read_results() - - for process in self.processes: - process.join(2) - - for process in self.processes: - if process.exitcode is None: - os.kill(process.pid, signal.SIGINT) - - read_results() - - for process in self.processes: - process.join(2) - - for process in self.processes: - if process.exitcode is None: - process.terminate() - - for process in self.processes: - process.join() - # clean up zombies - process.close() - - bb.codeparser.parser_cache_save() - bb.codeparser.parser_cache_savemerge() - bb.cache.SiggenRecipeInfo.reset() - bb.fetch.fetcher_parse_done() - if self.cooker.configuration.profile: - profiles = [] - for i in self.process_names: - logfile = "profile-parse-%s.log" % i - if os.path.exists(logfile) and os.path.getsize(logfile): - profiles.append(logfile) - - if profiles: - fn_out = "profile-parse.log.report" - bb.utils.process_profilelog(profiles, fn_out=fn_out) - print("Processed parsing statistics saved to %s" % (fn_out)) - - def final_cleanup(self): - if self.syncthread: - self.syncthread.join() - - def load_cached(self): - for mc, cache, filename, appends, layername in self.fromcache: - infos = cache.loadCached(filename, appends) - yield False, mc, infos - - def parse_generator(self): - empty = False - while self.processes or not empty: - for process in self.processes.copy(): - if not process.is_alive(): - process.join() - self.processes.remove(process) - - if self.parsed >= self.toparse: - break - - try: - result = self.result_queue.get(timeout=0.25) - except queue.Empty: - empty = True - yield None, None, None - else: - empty = False - yield result - - if not (self.parsed >= self.toparse): - raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? (%s %s of %s) Exiting." % (len(self.processes), self.parsed, self.toparse), None) - - - def parse_next(self): - result = [] - parsed = None - try: - parsed, mc, result = next(self.results) - if isinstance(result, BaseException): - # Turn exceptions back into exceptions - raise result - if parsed is None: - # Timeout, loop back through the main loop - return True - - except StopIteration: - self.shutdown() - return False - except bb.BBHandledException as exc: - self.error += 1 - logger.debug('Failed to parse recipe: %s' % exc.recipe) - self.shutdown(clean=False) - return False - except ParsingFailure as exc: - self.error += 1 - - exc_desc = str(exc) - if isinstance(exc, SystemExit) and not isinstance(exc.code, str): - exc_desc = 'Exited with "%d"' % exc.code - - logger.error('Unable to parse %s: %s' % (exc.recipe, exc_desc)) - self.shutdown(clean=False) - return False - except bb.parse.ParseError as exc: - self.error += 1 - logger.error(str(exc)) - self.shutdown(clean=False, eventmsg=str(exc)) - return False - except bb.data_smart.ExpansionError as exc: - def skip_frames(f, fn_prefix): - while f and f.tb_frame.f_code.co_filename.startswith(fn_prefix): - f = f.tb_next - return f - - self.error += 1 - bbdir = os.path.dirname(__file__) + os.sep - etype, value, tb = sys.exc_info() - - # Remove any frames where the code comes from bitbake. This - # prevents deep (and pretty useless) backtraces for expansion error - tb = skip_frames(tb, bbdir) - cur = tb - while cur: - cur.tb_next = skip_frames(cur.tb_next, bbdir) - cur = cur.tb_next - - logger.error('ExpansionError during parsing %s', value.recipe, - exc_info=(etype, value, tb)) - self.shutdown(clean=False) - return False - except Exception as exc: - self.error += 1 - _, value, _ = sys.exc_info() - if hasattr(value, "recipe"): - logger.error('Unable to parse %s' % value.recipe, - exc_info=sys.exc_info()) - else: - # Most likely, an exception occurred during raising an exception - import traceback - logger.error('Exception during parse: %s' % traceback.format_exc()) - self.shutdown(clean=False) - return False - - self.current += 1 - self.virtuals += len(result) - if parsed: - self.parsed += 1 - if self.parsed % self.progress_chunk == 0: - bb.event.fire(bb.event.ParseProgress(self.parsed, self.toparse), - self.cfgdata) - else: - self.cached += 1 - - for virtualfn, info_array in result: - if info_array[0].skipped: - self.skipped += 1 - self.cooker.skiplist_by_mc[mc][virtualfn] = SkippedPackage(info_array[0]) - self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc], - parsed=parsed, watcher = self.cooker.add_filewatch) - return True - - def reparse(self, filename): - bb.cache.SiggenRecipeInfo.reset() - to_reparse = set() - for mc in self.cooker.multiconfigs: - layername = self.cooker.collections[mc].calc_bbfile_priority(filename)[2] - to_reparse.add((mc, filename, self.cooker.collections[mc].get_file_appends(filename), layername)) - - for mc, filename, appends, layername in to_reparse: - infos = self.bb_caches[mc].parse(filename, appends, layername) - for vfn, info_array in infos: - self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array) diff --git a/bitbake/lib/bb/cookerdata.py b/bitbake/lib/bb/cookerdata.py deleted file mode 100644 index 65c153a5bb..0000000000 --- a/bitbake/lib/bb/cookerdata.py +++ /dev/null @@ -1,552 +0,0 @@ - -# -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer -# Copyright (C) 2005 Holger Hans Peter Freyther -# Copyright (C) 2005 ROAD GmbH -# Copyright (C) 2006 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import logging -import os -import re -import sys -import hashlib -from functools import wraps -import bb -from bb import data -import bb.parse - -logger = logging.getLogger("BitBake") -parselog = logging.getLogger("BitBake.Parsing") - -class ConfigParameters(object): - def __init__(self, argv=None): - self.options, targets = self.parseCommandLine(argv or sys.argv) - self.environment = self.parseEnvironment() - - self.options.pkgs_to_build = targets or [] - - for key, val in self.options.__dict__.items(): - setattr(self, key, val) - - def parseCommandLine(self, argv=sys.argv): - raise Exception("Caller must implement commandline option parsing") - - def parseEnvironment(self): - return os.environ.copy() - - def updateFromServer(self, server): - if not self.options.cmd: - defaulttask, error = server.runCommand(["getVariable", "BB_DEFAULT_TASK"]) - if error: - raise Exception("Unable to get the value of BB_DEFAULT_TASK from the server: %s" % error) - self.options.cmd = defaulttask or "build" - _, error = server.runCommand(["setConfig", "cmd", self.options.cmd]) - if error: - raise Exception("Unable to set configuration option 'cmd' on the server: %s" % error) - - if not self.options.pkgs_to_build: - bbpkgs, error = server.runCommand(["getVariable", "BBTARGETS"]) - if error: - raise Exception("Unable to get the value of BBTARGETS from the server: %s" % error) - if bbpkgs: - self.options.pkgs_to_build.extend(bbpkgs.split()) - - def updateToServer(self, server, environment): - options = {} - for o in ["halt", "force", "invalidate_stamp", - "dry_run", "dump_signatures", - "extra_assume_provided", "profile", - "prefile", "postfile", "server_timeout", - "nosetscene", "setsceneonly", "skipsetscene", - "runall", "runonly", "writeeventlog"]: - options[o] = getattr(self.options, o) - - options['build_verbose_shell'] = self.options.verbose - options['build_verbose_stdout'] = self.options.verbose - options['default_loglevel'] = bb.msg.loggerDefaultLogLevel - options['debug_domains'] = bb.msg.loggerDefaultDomains - - ret, error = server.runCommand(["updateConfig", options, environment, sys.argv]) - if error: - raise Exception("Unable to update the server configuration with local parameters: %s" % error) - - def parseActions(self): - # Parse any commandline into actions - action = {'action':None, 'msg':None} - if self.options.show_environment: - if 'world' in self.options.pkgs_to_build: - action['msg'] = "'world' is not a valid target for --environment." - elif 'universe' in self.options.pkgs_to_build: - action['msg'] = "'universe' is not a valid target for --environment." - elif len(self.options.pkgs_to_build) > 1: - action['msg'] = "Only one target can be used with the --environment option." - elif self.options.buildfile and len(self.options.pkgs_to_build) > 0: - action['msg'] = "No target should be used with the --environment and --buildfile options." - elif self.options.pkgs_to_build: - action['action'] = ["showEnvironmentTarget", self.options.pkgs_to_build] - else: - action['action'] = ["showEnvironment", self.options.buildfile] - elif self.options.buildfile is not None: - action['action'] = ["buildFile", self.options.buildfile, self.options.cmd] - elif self.options.revisions_changed: - action['action'] = ["compareRevisions"] - elif self.options.show_versions: - action['action'] = ["showVersions"] - elif self.options.parse_only: - action['action'] = ["parseFiles"] - elif self.options.dot_graph: - if self.options.pkgs_to_build: - action['action'] = ["generateDotGraph", self.options.pkgs_to_build, self.options.cmd] - else: - action['msg'] = "Please specify a package name for dependency graph generation." - else: - if self.options.pkgs_to_build: - action['action'] = ["buildTargets", self.options.pkgs_to_build, self.options.cmd] - else: - #action['msg'] = "Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information." - action = None - self.options.initialaction = action - return action - -class CookerConfiguration(object): - """ - Manages build options and configurations for one run - """ - - def __init__(self): - self.debug_domains = bb.msg.loggerDefaultDomains - self.default_loglevel = bb.msg.loggerDefaultLogLevel - self.extra_assume_provided = [] - self.prefile = [] - self.postfile = [] - self.cmd = None - self.halt = True - self.force = False - self.profile = False - self.nosetscene = False - self.setsceneonly = False - self.skipsetscene = False - self.invalidate_stamp = False - self.dump_signatures = [] - self.build_verbose_shell = False - self.build_verbose_stdout = False - self.dry_run = False - self.tracking = False - self.writeeventlog = False - self.limited_deps = False - self.runall = [] - self.runonly = [] - - self.env = {} - - def __getstate__(self): - state = {} - for key in self.__dict__.keys(): - state[key] = getattr(self, key) - return state - - def __setstate__(self,state): - for k in state: - setattr(self, k, state[k]) - - -def catch_parse_error(func): - """Exception handling bits for our parsing""" - @wraps(func) - def wrapped(fn, *args): - try: - return func(fn, *args) - except Exception as exc: - import traceback - - bbdir = os.path.dirname(__file__) + os.sep - exc_class, exc, tb = sys.exc_info() - for tb in iter(lambda: tb.tb_next, None): - # Skip frames in bitbake itself, we only want the metadata - fn, _, _, _ = traceback.extract_tb(tb, 1)[0] - if not fn.startswith(bbdir): - break - parselog.critical("Unable to parse %s" % fn, exc_info=(exc_class, exc, tb)) - raise bb.BBHandledException() - return wrapped - -@catch_parse_error -def parse_config_file(fn, data, include=True): - return bb.parse.handle(fn, data, include, baseconfig=True) - -@catch_parse_error -def _inherit(bbclass, data): - bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data) - return data - -def findConfigFile(configfile, data): - search = [] - bbpath = data.getVar("BBPATH") - if bbpath: - for i in bbpath.split(":"): - search.append(os.path.join(i, "conf", configfile)) - path = os.getcwd() - while path != "/": - search.append(os.path.join(path, "conf", configfile)) - path, _ = os.path.split(path) - - for i in search: - if os.path.exists(i): - return i - - return None - -# -# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working -# up to /. If that fails, bitbake would fall back to cwd. -# - -def findTopdir(): - d = bb.data.init() - bbpath = None - if 'BBPATH' in os.environ: - bbpath = os.environ['BBPATH'] - d.setVar('BBPATH', bbpath) - - layerconf = findConfigFile("bblayers.conf", d) - if layerconf: - return os.path.dirname(os.path.dirname(layerconf)) - - return os.path.abspath(os.getcwd()) - -class CookerDataBuilder(object): - - def __init__(self, cookercfg, worker = False): - - self.prefiles = cookercfg.prefile - self.postfiles = cookercfg.postfile - self.tracking = cookercfg.tracking - - bb.utils.set_context(bb.utils.clean_context()) - bb.event.set_class_handlers(bb.event.clean_class_handlers()) - self.basedata = bb.data.init() - if self.tracking: - self.basedata.enableTracking() - - # Keep a datastore of the initial environment variables and their - # values from when BitBake was launched to enable child processes - # to use environment variables which have been cleaned from the - # BitBake processes env - self.savedenv = bb.data.init() - for k in cookercfg.env: - self.savedenv.setVar(k, cookercfg.env[k]) - if k in bb.data_smart.bitbake_renamed_vars: - bb.error('Shell environment variable %s has been renamed to %s' % (k, bb.data_smart.bitbake_renamed_vars[k])) - bb.fatal("Exiting to allow enviroment variables to be corrected") - - filtered_keys = bb.utils.approved_variables() - bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys) - self.basedata.setVar("BB_ORIGENV", self.savedenv) - self.basedata.setVar("__bbclasstype", "global") - - if worker: - self.basedata.setVar("BB_WORKERCONTEXT", "1") - - self.data = self.basedata - self.mcdata = {} - - def calc_datastore_hashes(self): - data_hash = hashlib.sha256() - data_hash.update(self.data.get_hash().encode('utf-8')) - multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split() - for config in multiconfig: - data_hash.update(self.mcdata[config].get_hash().encode('utf-8')) - self.data_hash = data_hash.hexdigest() - - def parseBaseConfiguration(self, worker=False): - mcdata = {} - try: - self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles) - - servercontext = self.data.getVar("BB_WORKERCONTEXT", False) is None and not worker - bb.fetch.fetcher_init(self.data, servercontext) - bb.parse.init_parser(self.data) - - bb.event.fire(bb.event.ConfigParsed(), self.data) - - reparse_cnt = 0 - while self.data.getVar("BB_INVALIDCONF", False) is True: - if reparse_cnt > 20: - logger.error("Configuration has been re-parsed over 20 times, " - "breaking out of the loop...") - raise Exception("Too deep config re-parse loop. Check locations where " - "BB_INVALIDCONF is being set (ConfigParsed event handlers)") - self.data.setVar("BB_INVALIDCONF", False) - self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles) - reparse_cnt += 1 - bb.event.fire(bb.event.ConfigParsed(), self.data) - - bb.parse.init_parser(self.data) - mcdata[''] = self.data - - multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split() - for config in multiconfig: - if config[0].isdigit(): - bb.fatal("Multiconfig name '%s' is invalid as multiconfigs cannot start with a digit" % config) - parsed_mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config) - bb.event.fire(bb.event.ConfigParsed(), parsed_mcdata) - mcdata[config] = parsed_mcdata - if multiconfig: - bb.event.fire(bb.event.MultiConfigParsed(mcdata), self.data) - - except bb.data_smart.ExpansionError as e: - logger.error(str(e)) - raise bb.BBHandledException() - - bb.codeparser.update_module_dependencies(self.data) - - # Handle obsolete variable names - d = self.data - renamedvars = d.getVarFlags('BB_RENAMED_VARIABLES') or {} - renamedvars.update(bb.data_smart.bitbake_renamed_vars) - issues = False - for v in renamedvars: - if d.getVar(v) != None or d.hasOverrides(v): - issues = True - loginfo = {} - history = d.varhistory.get_variable_refs(v) - for h in history: - for line in history[h]: - loginfo = {'file' : h, 'line' : line} - bb.data.data_smart._print_rename_error(v, loginfo, renamedvars) - if not history: - bb.data.data_smart._print_rename_error(v, loginfo, renamedvars) - if issues: - raise bb.BBHandledException() - - for mc in mcdata: - mcdata[mc].renameVar("__depends", "__base_depends") - mcdata[mc].setVar("__bbclasstype", "recipe") - - # Create a copy so we can reset at a later date when UIs disconnect - self.mcorigdata = mcdata - for mc in mcdata: - self.mcdata[mc] = bb.data.createCopy(mcdata[mc]) - self.data = self.mcdata[''] - self.calc_datastore_hashes() - - def reset(self): - # We may not have run parseBaseConfiguration() yet - if not hasattr(self, 'mcorigdata'): - return - for mc in self.mcorigdata: - self.mcdata[mc] = bb.data.createCopy(self.mcorigdata[mc]) - self.data = self.mcdata[''] - - def _findLayerConf(self, data): - return findConfigFile("bblayers.conf", data) - - def parseConfigurationFiles(self, prefiles, postfiles, mc = ""): - data = bb.data.createCopy(self.basedata) - data.setVar("BB_CURRENT_MC", mc) - - # Parse files for loading *before* bitbake.conf and any includes - for f in prefiles: - data = parse_config_file(f, data) - - layerconf = self._findLayerConf(data) - if layerconf: - parselog.debug2("Found bblayers.conf (%s)", layerconf) - # By definition bblayers.conf is in conf/ of TOPDIR. - # We may have been called with cwd somewhere else so reset TOPDIR - data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf))) - data = parse_config_file(layerconf, data) - - if not data.getVar("BB_CACHEDIR"): - data.setVar("BB_CACHEDIR", "${TOPDIR}/cache") - - bb.codeparser.parser_cache_init(data.getVar("BB_CACHEDIR")) - - layers = (data.getVar('BBLAYERS') or "").split() - broken_layers = [] - - if not layers: - bb.fatal("The bblayers.conf file doesn't contain any BBLAYERS definition") - - data = bb.data.createCopy(data) - approved = bb.utils.approved_variables() - - # Check whether present layer directories exist - for layer in layers: - if not os.path.isdir(layer): - broken_layers.append(layer) - - if broken_layers: - parselog.critical("The following layer directories do not exist:") - for layer in broken_layers: - parselog.critical(" %s", layer) - parselog.critical("Please check BBLAYERS in %s" % (layerconf)) - raise bb.BBHandledException() - - layerseries = None - compat_entries = {} - for layer in layers: - parselog.debug2("Adding layer %s", layer) - if 'HOME' in approved and '~' in layer: - layer = os.path.expanduser(layer) - if layer.endswith('/'): - layer = layer.rstrip('/') - data.setVar('LAYERDIR', layer) - data.setVar('LAYERDIR_RE', re.escape(layer)) - data = parse_config_file(os.path.join(layer, "conf", "layer.conf"), data) - data.expandVarref('LAYERDIR') - data.expandVarref('LAYERDIR_RE') - - # Sadly we can't have nice things. - # Some layers think they're going to be 'clever' and copy the values from - # another layer, e.g. using ${LAYERSERIES_COMPAT_core}. The whole point of - # this mechanism is to make it clear which releases a layer supports and - # show when a layer master branch is bitrotting and is unmaintained. - # We therefore avoid people doing this here. - collections = (data.getVar('BBFILE_COLLECTIONS') or "").split() - for c in collections: - compat_entry = data.getVar("LAYERSERIES_COMPAT_%s" % c) - if compat_entry: - compat_entries[c] = set(compat_entry.split()) - data.delVar("LAYERSERIES_COMPAT_%s" % c) - if not layerseries: - layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split()) - if layerseries: - data.delVar("LAYERSERIES_CORENAMES") - - data.delVar('LAYERDIR_RE') - data.delVar('LAYERDIR') - for c in compat_entries: - data.setVar("LAYERSERIES_COMPAT_%s" % c, " ".join(sorted(compat_entries[c]))) - - bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split() - collections = (data.getVar('BBFILE_COLLECTIONS') or "").split() - invalid = [] - for entry in bbfiles_dynamic: - parts = entry.split(":", 1) - if len(parts) != 2: - invalid.append(entry) - continue - l, f = parts - invert = l[0] == "!" - if invert: - l = l[1:] - if (l in collections and not invert) or (l not in collections and invert): - data.appendVar("BBFILES", " " + f) - if invalid: - bb.fatal("BBFILES_DYNAMIC entries must be of the form {!}:, not:\n %s" % "\n ".join(invalid)) - - collections_tmp = collections[:] - for c in collections: - collections_tmp.remove(c) - if c in collections_tmp: - bb.fatal("Found duplicated BBFILE_COLLECTIONS '%s', check bblayers.conf or layer.conf to fix it." % c) - - compat = set() - if c in compat_entries: - compat = compat_entries[c] - if compat and not layerseries: - bb.fatal("No core layer found to work with layer '%s'. Missing entry in bblayers.conf?" % c) - if compat and not (compat & layerseries): - bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)" - % (c, " ".join(layerseries), " ".join(compat))) - elif not compat and not data.getVar("BB_WORKERCONTEXT"): - bb.warn("Layer %s should set LAYERSERIES_COMPAT_%s in its conf/layer.conf file to list the core layer names it is compatible with." % (c, c)) - - data.setVar("LAYERSERIES_CORENAMES", " ".join(sorted(layerseries))) - - if not data.getVar("BBPATH"): - msg = "The BBPATH variable is not set" - if not layerconf: - msg += (" and bitbake did not find a conf/bblayers.conf file in" - " the expected location.\nMaybe you accidentally" - " invoked bitbake from the wrong directory?") - bb.fatal(msg) - - if not data.getVar("TOPDIR"): - data.setVar("TOPDIR", os.path.abspath(os.getcwd())) - if not data.getVar("BB_CACHEDIR"): - data.setVar("BB_CACHEDIR", "${TOPDIR}/cache") - bb.codeparser.parser_cache_init(data.getVar("BB_CACHEDIR")) - - data = parse_config_file(os.path.join("conf", "bitbake.conf"), data) - - # Parse files for loading *after* bitbake.conf and any includes - for p in postfiles: - data = parse_config_file(p, data) - - # Handle any INHERITs and inherit the base class - bbclasses = ["base"] + (data.getVar('INHERIT') or "").split() - for bbclass in bbclasses: - data = _inherit(bbclass, data) - - # Normally we only register event handlers at the end of parsing .bb files - # We register any handlers we've found so far here... - for var in data.getVar('__BBHANDLERS', False) or []: - handlerfn = data.getVarFlag(var, "filename", False) - if not handlerfn: - parselog.critical("Undefined event handler function '%s'" % var) - raise bb.BBHandledException() - handlerln = int(data.getVarFlag(var, "lineno", False)) - bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln, data) - - data.setVar('BBINCLUDED',bb.parse.get_file_depends(data)) - - return data - - @staticmethod - def _parse_recipe(bb_data, bbfile, appends, mc, layername): - bb_data.setVar("__BBMULTICONFIG", mc) - bb_data.setVar("FILE_LAYERNAME", layername) - - bbfile_loc = os.path.abspath(os.path.dirname(bbfile)) - bb.parse.cached_mtime_noerror(bbfile_loc) - - if appends: - bb_data.setVar('__BBAPPEND', " ".join(appends)) - - return bb.parse.handle(bbfile, bb_data) - - def parseRecipeVariants(self, bbfile, appends, virtonly=False, mc=None, layername=None): - """ - Load and parse one .bb build file - Return the data and whether parsing resulted in the file being skipped - """ - - if virtonly: - (bbfile, virtual, mc) = bb.cache.virtualfn2realfn(bbfile) - bb_data = self.mcdata[mc].createCopy() - bb_data.setVar("__ONLYFINALISE", virtual or "default") - return self._parse_recipe(bb_data, bbfile, appends, mc, layername) - - if mc is not None: - bb_data = self.mcdata[mc].createCopy() - return self._parse_recipe(bb_data, bbfile, appends, mc, layername) - - bb_data = self.data.createCopy() - datastores = self._parse_recipe(bb_data, bbfile, appends, '', layername) - - for mc in self.mcdata: - if not mc: - continue - bb_data = self.mcdata[mc].createCopy() - newstores = self._parse_recipe(bb_data, bbfile, appends, mc, layername) - for ns in newstores: - datastores["mc:%s:%s" % (mc, ns)] = newstores[ns] - - return datastores - - def parseRecipe(self, virtualfn, appends, layername): - """ - Return a complete set of data for fn. - To do this, we need to parse the file. - """ - logger.debug("Parsing %s (full)" % virtualfn) - (fn, virtual, mc) = bb.cache.virtualfn2realfn(virtualfn) - datastores = self.parseRecipeVariants(virtualfn, appends, virtonly=True, layername=layername) - return datastores[virtual] diff --git a/bitbake/lib/bb/daemonize.py b/bitbake/lib/bb/daemonize.py deleted file mode 100644 index 7689404436..0000000000 --- a/bitbake/lib/bb/daemonize.py +++ /dev/null @@ -1,101 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -""" -Python Daemonizing helper - -Originally based on code Copyright (C) 2005 Chad J. Schroeder but now heavily modified -to allow a function to be daemonized and return for bitbake use by Richard Purdie -""" - -import os -import sys -import io -import traceback - -import bb - -def createDaemon(function, logfile): - """ - Detach a process from the controlling terminal and run it in the - background as a daemon, returning control to the caller. - """ - - # Ensure stdout/stderror are flushed before forking to avoid duplicate output - sys.stdout.flush() - sys.stderr.flush() - - try: - # Fork a child process so the parent can exit. This returns control to - # the command-line or shell. It also guarantees that the child will not - # be a process group leader, since the child receives a new process ID - # and inherits the parent's process group ID. This step is required - # to insure that the next call to os.setsid is successful. - pid = os.fork() - except OSError as e: - raise Exception("%s [%d]" % (e.strerror, e.errno)) - - if (pid == 0): # The first child. - # To become the session leader of this new session and the process group - # leader of the new process group, we call os.setsid(). The process is - # also guaranteed not to have a controlling terminal. - os.setsid() - try: - # Fork a second child and exit immediately to prevent zombies. This - # causes the second child process to be orphaned, making the init - # process responsible for its cleanup. And, since the first child is - # a session leader without a controlling terminal, it's possible for - # it to acquire one by opening a terminal in the future (System V- - # based systems). This second fork guarantees that the child is no - # longer a session leader, preventing the daemon from ever acquiring - # a controlling terminal. - pid = os.fork() # Fork a second child. - except OSError as e: - raise Exception("%s [%d]" % (e.strerror, e.errno)) - - if (pid != 0): - # Parent (the first child) of the second child. - # exit() or _exit()? - # _exit is like exit(), but it doesn't call any functions registered - # with atexit (and on_exit) or any registered signal handlers. It also - # closes any open file descriptors, but doesn't flush any buffered output. - # Using exit() may cause all any temporary files to be unexpectedly - # removed. It's therefore recommended that child branches of a fork() - # and the parent branch(es) of a daemon use _exit(). - os._exit(0) - else: - os.waitpid(pid, 0) - return - - # The second child. - - # Replace standard fds with our own - with open('/dev/null', 'r') as si: - os.dup2(si.fileno(), sys.stdin.fileno()) - - with open(logfile, 'a+') as so: - try: - os.dup2(so.fileno(), sys.stdout.fileno()) - os.dup2(so.fileno(), sys.stderr.fileno()) - except io.UnsupportedOperation: - sys.stdout = so - - # Have stdout and stderr be the same so log output matches chronologically - # and there aren't two separate buffers - sys.stderr = sys.stdout - - try: - function() - except Exception as e: - traceback.print_exc() - finally: - bb.event.print_ui_queue() - # os._exit() doesn't flush open files like os.exit() does. Manually flush - # stdout and stderr so that any logging output will be seen, particularly - # exception tracebacks. - sys.stdout.flush() - sys.stderr.flush() - os._exit(0) diff --git a/bitbake/lib/bb/data.py b/bitbake/lib/bb/data.py deleted file mode 100644 index f672a84451..0000000000 --- a/bitbake/lib/bb/data.py +++ /dev/null @@ -1,448 +0,0 @@ -""" -BitBake 'Data' implementations - -Functions for interacting with the data structure used by the -BitBake build tools. - -expandKeys and datastore iteration are the most expensive -operations. Updating overrides is now "on the fly" but still based -on the idea of the cookie monster introduced by zecke: -"At night the cookie monster came by and -suggested 'give me cookies on setting the variables and -things will work out'. Taking this suggestion into account -applying the skills from the not yet passed 'Entwurf und -Analyse von Algorithmen' lecture and the cookie -monster seems to be right. We will track setVar more carefully -to have faster datastore operations." - -This is a trade-off between speed and memory again but -the speed is more critical here. -""" - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2005 Holger Hans Peter Freyther -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import sys, os, re -import hashlib -from itertools import groupby - -from bb import data_smart -from bb import codeparser -import bb - -logger = data_smart.logger -_dict_type = data_smart.DataSmart - -def init(): - """Return a new object representing the Bitbake data""" - return _dict_type() - -def init_db(parent = None): - """Return a new object representing the Bitbake data, - optionally based on an existing object""" - if parent is not None: - return parent.createCopy() - else: - return _dict_type() - -def createCopy(source): - """Link the source set to the destination - If one does not find the value in the destination set, - search will go on to the source set to get the value. - Value from source are copy-on-write. i.e. any try to - modify one of them will end up putting the modified value - in the destination set. - """ - return source.createCopy() - -def initVar(var, d): - """Non-destructive var init for data structure""" - d.initVar(var) - -def keys(d): - """Return a list of keys in d""" - return d.keys() - -def expand(s, d, varname = None): - """Variable expansion using the data store""" - return d.expand(s, varname) - -def expandKeys(alterdata, readdata = None): - if readdata is None: - readdata = alterdata - - todolist = {} - for key in alterdata: - if not '${' in key: - continue - - ekey = expand(key, readdata) - if key == ekey: - continue - todolist[key] = ekey - - # These two for loops are split for performance to maximise the - # usefulness of the expand cache - for key in sorted(todolist): - ekey = todolist[key] - newval = alterdata.getVar(ekey, False) - if newval is not None: - val = alterdata.getVar(key, False) - if val is not None: - bb.warn("Variable key %s (%s) replaces original key %s (%s)." % (key, val, ekey, newval)) - alterdata.renameVar(key, ekey) - -def inheritFromOS(d, savedenv, permitted): - """Inherit variables from the initial environment.""" - exportlist = bb.utils.preserved_envvars_exported() - for s in savedenv.keys(): - if s in permitted: - try: - d.setVar(s, savedenv.getVar(s), op = 'from env') - if s in exportlist: - d.setVarFlag(s, "export", True, op = 'auto env export') - except TypeError: - pass - -def emit_var(var, o=sys.__stdout__, d = init(), all=False): - """Emit a variable to be sourced by a shell.""" - func = d.getVarFlag(var, "func", False) - if d.getVarFlag(var, 'python', False) and func: - return False - - export = bb.utils.to_boolean(d.getVarFlag(var, "export")) - unexport = bb.utils.to_boolean(d.getVarFlag(var, "unexport")) - if not all and not export and not unexport and not func: - return False - - try: - if all: - oval = d.getVar(var, False) - val = d.getVar(var) - except (KeyboardInterrupt): - raise - except Exception as exc: - o.write('# expansion of %s threw %s: %s\n' % (var, exc.__class__.__name__, str(exc))) - return False - - if all: - d.varhistory.emit(var, oval, val, o, d) - - if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all: - return False - - varExpanded = d.expand(var) - - if unexport: - o.write('unset %s\n' % varExpanded) - return False - - if val is None: - return False - - val = str(val) - - if varExpanded.startswith("BASH_FUNC_"): - varExpanded = varExpanded[10:-2] - val = val[3:] # Strip off "() " - o.write("%s() %s\n" % (varExpanded, val)) - o.write("export -f %s\n" % (varExpanded)) - return True - - if func: - # Write a comment indicating where the shell function came from (line number and filename) to make it easier - # for the user to diagnose task failures. This comment is also used by build.py to determine the metadata - # location of shell functions. - o.write("# line: {0}, file: {1}\n".format( - d.getVarFlag(var, "lineno", False), - d.getVarFlag(var, "filename", False))) - # NOTE: should probably check for unbalanced {} within the var - val = val.rstrip('\n') - o.write("%s() {\n%s\n}\n" % (varExpanded, val)) - return 1 - - if export: - o.write('export ') - - # if we're going to output this within doublequotes, - # to a shell, we need to escape the quotes in the var - alter = re.sub('"', '\\"', val) - alter = re.sub('\n', ' \\\n', alter) - alter = re.sub('\\$', '\\\\$', alter) - o.write('%s="%s"\n' % (varExpanded, alter)) - return False - -def emit_env(o=sys.__stdout__, d = init(), all=False): - """Emits all items in the data store in a format such that it can be sourced by a shell.""" - - isfunc = lambda key: bool(d.getVarFlag(key, "func", False)) - keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc) - grouped = groupby(keys, isfunc) - for isfunc, keys in grouped: - for key in sorted(keys): - emit_var(key, o, d, all and not isfunc) and o.write('\n') - -def exported_keys(d): - return (key for key in d.keys() if not key.startswith('__') and - bb.utils.to_boolean(d.getVarFlag(key, 'export')) and - not bb.utils.to_boolean(d.getVarFlag(key, 'unexport'))) - -def exported_vars(d): - k = list(exported_keys(d)) - for key in k: - try: - value = d.getVar(key) - except Exception as err: - bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE"), key, err)) - continue - - if value is not None: - yield key, str(value) - -def emit_func(func, o=sys.__stdout__, d = init()): - """Emits all items in the data store in a format such that it can be sourced by a shell.""" - - keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func", False)) - for key in sorted(keys): - emit_var(key, o, d, False) - - o.write('\n') - emit_var(func, o, d, False) and o.write('\n') - newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func)) - newdeps |= set((d.getVarFlag(func, "vardeps") or "").split()) - seen = set() - while newdeps: - deps = newdeps - seen |= deps - newdeps = set() - for dep in sorted(deps): - if d.getVarFlag(dep, "func", False) and not d.getVarFlag(dep, "python", False): - emit_var(dep, o, d, False) and o.write('\n') - newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep)) - newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split()) - newdeps -= seen - -_functionfmt = """ -def {function}(d): -{body}""" - -def emit_func_python(func, o=sys.__stdout__, d = init()): - """Emits all items in the data store in a format such that it can be sourced by a shell.""" - - def write_func(func, o, call = False): - body = d.getVar(func, False) - if not body.startswith("def"): - body = _functionfmt.format(function=func, body=body) - - o.write(body.strip() + "\n\n") - if call: - o.write(func + "(d)" + "\n\n") - - write_func(func, o, True) - pp = bb.codeparser.PythonParser(func, logger) - pp.parse_python(d.getVar(func, False)) - newdeps = pp.execs - newdeps |= set((d.getVarFlag(func, "vardeps") or "").split()) - seen = set() - while newdeps: - deps = newdeps - seen |= deps - newdeps = set() - for dep in deps: - if d.getVarFlag(dep, "func", False) and d.getVarFlag(dep, "python", False): - write_func(dep, o) - pp = bb.codeparser.PythonParser(dep, logger) - pp.parse_python(d.getVar(dep, False)) - newdeps |= pp.execs - newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split()) - newdeps -= seen - -def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_vars, d, codeparsedata): - def handle_contains(value, contains, exclusions, d): - newvalue = [] - if value: - newvalue.append(str(value)) - for k in sorted(contains): - if k in exclusions or k in ignored_vars: - continue - l = (d.getVar(k) or "").split() - for item in sorted(contains[k]): - for word in item.split(): - if not word in l: - newvalue.append("\n%s{%s} = Unset" % (k, item)) - break - else: - newvalue.append("\n%s{%s} = Set" % (k, item)) - return "".join(newvalue) - - def handle_remove(value, deps, removes, d): - for r in sorted(removes): - r2 = d.expandWithRefs(r, None) - value += "\n_remove of %s" % r - deps |= r2.references - deps = deps | (keys & r2.execs) - value = handle_contains(value, r2.contains, exclusions, d) - return value - - deps = set() - try: - if key in mod_funcs: - exclusions = set() - moddep = bb.codeparser.modulecode_deps[key] - value = handle_contains(moddep[4], moddep[3], exclusions, d) - return frozenset((moddep[0] | keys & moddep[1]) - ignored_vars), value - - if key[-1] == ']': - vf = key[:-1].split('[') - if vf[1] == "vardepvalueexclude": - return deps, "" - value, parser = d.getVarFlag(vf[0], vf[1], False, retparser=True) - deps |= parser.references - deps = deps | (keys & parser.execs) - deps -= ignored_vars - return frozenset(deps), value - varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {} - vardeps = varflags.get("vardeps") - exclusions = varflags.get("vardepsexclude", "").split() - - if "vardepvalue" in varflags: - value = varflags.get("vardepvalue") - elif varflags.get("func"): - if varflags.get("python"): - value = codeparsedata.getVarFlag(key, "_content", False) - parser = bb.codeparser.PythonParser(key, logger) - parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno")) - deps = deps | parser.references - deps = deps | (keys & parser.execs) - value = handle_contains(value, parser.contains, exclusions, d) - else: - value, parsedvar = codeparsedata.getVarFlag(key, "_content", False, retparser=True) - parser = bb.codeparser.ShellParser(key, logger) - parser.parse_shell(parsedvar.value) - deps = deps | shelldeps - deps = deps | parsedvar.references - deps = deps | (keys & parser.execs) | (keys & parsedvar.execs) - value = handle_contains(value, parsedvar.contains, exclusions, d) - if hasattr(parsedvar, "removes"): - value = handle_remove(value, deps, parsedvar.removes, d) - if vardeps is None: - parser.log.flush() - if "prefuncs" in varflags: - deps = deps | set(varflags["prefuncs"].split()) - if "postfuncs" in varflags: - deps = deps | set(varflags["postfuncs"].split()) - if "exports" in varflags: - deps = deps | set(varflags["exports"].split()) - else: - value, parser = d.getVarFlag(key, "_content", False, retparser=True) - deps |= parser.references - deps = deps | (keys & parser.execs) - value = handle_contains(value, parser.contains, exclusions, d) - if hasattr(parser, "removes"): - value = handle_remove(value, deps, parser.removes, d) - - if "vardepvalueexclude" in varflags: - exclude = varflags.get("vardepvalueexclude") - for excl in exclude.split('|'): - if excl: - value = value.replace(excl, '') - - # Add varflags, assuming an exclusion list is set - if varflagsexcl: - varfdeps = [] - for f in varflags: - if f not in varflagsexcl: - varfdeps.append('%s[%s]' % (key, f)) - if varfdeps: - deps |= set(varfdeps) - - deps |= set((vardeps or "").split()) - deps -= set(exclusions) - deps -= ignored_vars - except bb.parse.SkipRecipe: - raise - except Exception as e: - bb.warn("Exception during build_dependencies for %s" % key) - raise - return frozenset(deps), value - #bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs))) - #d.setVarFlag(key, "vardeps", deps) - -def generate_dependencies(d, ignored_vars): - - mod_funcs = set(bb.codeparser.modulecode_deps.keys()) - keys = set(key for key in d if not key.startswith("__")) | mod_funcs - shelldeps = set(key for key in d.getVar("__exportlist", False) if bb.utils.to_boolean(d.getVarFlag(key, "export")) and not bb.utils.to_boolean(d.getVarFlag(key, "unexport"))) - varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS') - - codeparserd = d.createCopy() - for forced in (d.getVar('BB_HASH_CODEPARSER_VALS') or "").split(): - key, value = forced.split("=", 1) - codeparserd.setVar(key, value) - - deps = {} - values = {} - - tasklist = d.getVar('__BBTASKS', False) or [] - for task in tasklist: - deps[task], values[task] = build_dependencies(task, keys, mod_funcs, shelldeps, varflagsexcl, ignored_vars, d, codeparserd) - newdeps = deps[task] - seen = set() - while newdeps: - nextdeps = newdeps - seen |= nextdeps - newdeps = set() - for dep in nextdeps: - if dep not in deps: - deps[dep], values[dep] = build_dependencies(dep, keys, mod_funcs, shelldeps, varflagsexcl, ignored_vars, d, codeparserd) - newdeps |= deps[dep] - newdeps -= seen - #print "For %s: %s" % (task, str(deps[task])) - return tasklist, deps, values - -def generate_dependency_hash(tasklist, gendeps, lookupcache, ignored_vars, fn): - taskdeps = {} - basehash = {} - - for task in tasklist: - data = lookupcache[task] - - if data is None: - bb.error("Task %s from %s seems to be empty?!" % (task, fn)) - data = [] - else: - data = [data] - - newdeps = gendeps[task] - seen = set() - while newdeps: - nextdeps = newdeps - seen |= nextdeps - newdeps = set() - for dep in nextdeps: - newdeps |= gendeps[dep] - newdeps -= seen - - alldeps = sorted(seen) - for dep in alldeps: - data.append(dep) - var = lookupcache[dep] - if var is not None: - data.append(str(var)) - k = fn + ":" + task - basehash[k] = hashlib.sha256("".join(data).encode("utf-8")).hexdigest() - taskdeps[task] = frozenset(seen) - - return taskdeps, basehash - -def inherits_class(klass, d): - val = d.getVar('__inherit_cache', False) or [] - needle = '/%s.bbclass' % klass - for v in val: - if v.endswith(needle): - return True - return False diff --git a/bitbake/lib/bb/data_smart.py b/bitbake/lib/bb/data_smart.py deleted file mode 100644 index 2e0d308588..0000000000 --- a/bitbake/lib/bb/data_smart.py +++ /dev/null @@ -1,1148 +0,0 @@ -""" -BitBake Smart Dictionary Implementation - -Functions for interacting with the data structure used by the -BitBake build tools. - -""" - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2004, 2005 Seb Frankengul -# Copyright (C) 2005, 2006 Holger Hans Peter Freyther -# Copyright (C) 2005 Uli Luckas -# Copyright (C) 2005 ROAD GmbH -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import builtins -import copy -import re -import sys -from collections.abc import MutableMapping -import logging -import hashlib -import bb, bb.codeparser -import bb.filter -from bb import utils -from bb.COW import COWDictBase - -logger = logging.getLogger("BitBake.Data") - -__setvar_keyword__ = [":append", ":prepend", ":remove"] -__setvar_regexp__ = re.compile(r'(?P.*?)(?P:append|:prepend|:remove)(:(?P[^A-Z]*))?$') -__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+}") -__expand_python_regexp__ = re.compile(r"\${@(?:{.*?}|.)+?}") -__whitespace_split__ = re.compile(r'(\s)') -__override_regexp__ = re.compile(r'[a-z0-9]+') - -bitbake_renamed_vars = { - "BB_ENV_WHITELIST": "BB_ENV_PASSTHROUGH", - "BB_ENV_EXTRAWHITE": "BB_ENV_PASSTHROUGH_ADDITIONS", - "BB_HASHBASE_WHITELIST": "BB_BASEHASH_IGNORE_VARS", - "BB_HASHCONFIG_WHITELIST": "BB_HASHCONFIG_IGNORE_VARS", - "BB_HASHTASK_WHITELIST": "BB_TASKHASH_IGNORE_TASKS", - "BB_SETSCENE_ENFORCE_WHITELIST": "BB_SETSCENE_ENFORCE_IGNORE_TASKS", - "MULTI_PROVIDER_WHITELIST": "BB_MULTI_PROVIDER_ALLOWED", - "BB_STAMP_WHITELIST": "is a deprecated variable and support has been removed", - "BB_STAMP_POLICY": "is a deprecated variable and support has been removed", -} - -def infer_caller_details(loginfo, parent = False, varval = True): - """Save the caller the trouble of specifying everything.""" - # Save effort. - if 'ignore' in loginfo and loginfo['ignore']: - return - # If nothing was provided, mark this as possibly unneeded. - if not loginfo: - loginfo['ignore'] = True - return - # Infer caller's likely values for variable (var) and value (value), - # to reduce clutter in the rest of the code. - above = None - def set_above(): - try: - raise Exception - except Exception: - tb = sys.exc_info()[2] - if parent: - return tb.tb_frame.f_back.f_back.f_back - else: - return tb.tb_frame.f_back.f_back - - if varval and ('variable' not in loginfo or 'detail' not in loginfo): - if not above: - above = set_above() - lcls = above.f_locals.items() - for k, v in lcls: - if k == 'value' and 'detail' not in loginfo: - loginfo['detail'] = v - if k == 'var' and 'variable' not in loginfo: - loginfo['variable'] = v - # Infer file/line/function from traceback - # Don't use traceback.extract_stack() since it fills the line contents which - # we don't need and that hits stat syscalls - if 'file' not in loginfo: - if not above: - above = set_above() - f = above.f_back - line = f.f_lineno - file = f.f_code.co_filename - func = f.f_code.co_name - loginfo['file'] = file - loginfo['line'] = line - if func not in loginfo: - loginfo['func'] = func - -class VariableParse: - def __init__(self, varname, d, unexpanded_value = None, val = None): - self.varname = varname - self.d = d - self.value = val - self.unexpanded_value = unexpanded_value - - self.references = set() - self.execs = set() - self.contains = {} - - def var_sub(self, match): - key = match.group()[2:-1] - if self.varname and key: - if self.varname == key: - raise Exception("variable %s references itself!" % self.varname) - var = self.d.getVarFlag(key, "_content") - self.references.add(key) - if var is not None: - return var - else: - return match.group() - - def python_sub(self, match): - if isinstance(match, str): - code = match - else: - code = match.group()[3:-1] - - # Do not run code that contains one or more unexpanded variables - # instead return the code with the characters we removed put back - if __expand_var_regexp__.findall(code): - return "${@" + code + "}" - - if self.varname: - varname = 'Var <%s>' % self.varname - else: - varname = '' - codeobj = compile(code.strip(), varname, "eval") - - parser = bb.codeparser.PythonParser(self.varname, logger) - parser.parse_python(code) - if self.varname: - vardeps = self.d.getVarFlag(self.varname, "vardeps") - if vardeps is None: - parser.log.flush() - else: - parser.log.flush() - self.references |= parser.references - self.execs |= parser.execs - - for k in parser.contains: - if k not in self.contains: - self.contains[k] = parser.contains[k].copy() - else: - self.contains[k].update(parser.contains[k]) - value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d}) - return str(value) - -class DataContext(dict): - excluded = set([i for i in dir(builtins) if not i.startswith('_')] + ['oe']) - - def __init__(self, metadata, **kwargs): - self.metadata = metadata - dict.__init__(self, **kwargs) - self['d'] = metadata - self.context = set(bb.utils.get_context()) - - def __missing__(self, key): - if key in self.excluded or key in self.context: - raise KeyError(key) - - value = self.metadata.getVar(key) - if value is None: - raise KeyError(key) - else: - return value - -class ExpansionError(Exception): - def __init__(self, varname, expression, exception): - self.expression = expression - self.variablename = varname - self.exception = exception - self.varlist = [varname or expression or ""] - if varname: - if expression: - self.msg = "Failure expanding variable %s, expression was %s which triggered exception %s: %s" % (varname, expression, type(exception).__name__, exception) - else: - self.msg = "Failure expanding variable %s: %s: %s" % (varname, type(exception).__name__, exception) - else: - self.msg = "Failure expanding expression %s which triggered exception %s: %s" % (expression, type(exception).__name__, exception) - Exception.__init__(self, self.msg) - self.args = (varname, expression, exception) - - def addVar(self, varname): - if varname: - self.varlist.append(varname) - - def __str__(self): - chain = "\nThe variable dependency chain for the failure is: " + " -> ".join(self.varlist) - return self.msg + chain - -class IncludeHistory(object): - def __init__(self, parent = None, filename = '[TOP LEVEL]'): - self.parent = parent - self.filename = filename - self.children = [] - self.current = self - - def copy(self): - new = IncludeHistory(self.parent, self.filename) - for c in self.children: - new.children.append(c) - return new - - def include(self, filename): - newfile = IncludeHistory(self.current, filename) - self.current.children.append(newfile) - self.current = newfile - return self - - def __enter__(self): - pass - - def __exit__(self, a, b, c): - if self.current.parent: - self.current = self.current.parent - else: - bb.warn("Include log: Tried to finish '%s' at top level." % self.filename) - return False - - def emit(self, o, level = 0): - """Emit an include history file, and its children.""" - if level: - spaces = " " * (level - 1) - o.write("# %s%s" % (spaces, self.filename)) - if len(self.children) > 0: - o.write(" includes:") - else: - o.write("#\n# INCLUDE HISTORY:\n#") - level = level + 1 - for child in self.children: - o.write("\n") - child.emit(o, level) - -class VariableHistory(object): - def __init__(self, dataroot): - self.dataroot = dataroot - self.variables = COWDictBase.copy() - - def copy(self): - new = VariableHistory(self.dataroot) - new.variables = self.variables.copy() - return new - - def __getstate__(self): - vardict = {} - for k, v in self.variables.iteritems(): - vardict[k] = v - return {'dataroot': self.dataroot, - 'variables': vardict} - - def __setstate__(self, state): - self.dataroot = state['dataroot'] - self.variables = COWDictBase.copy() - for k, v in state['variables'].items(): - self.variables[k] = v - - def record(self, *kwonly, **loginfo): - if not self.dataroot._tracking: - return - if len(kwonly) > 0: - raise TypeError - infer_caller_details(loginfo, parent = True) - if 'ignore' in loginfo and loginfo['ignore']: - return - if 'op' not in loginfo or not loginfo['op']: - loginfo['op'] = 'set' - if 'variable' not in loginfo or 'file' not in loginfo: - raise ValueError("record() missing variable or file.") - var = loginfo['variable'] - if var not in self.variables: - self.variables[var] = [] - if not isinstance(self.variables[var], list): - return - if 'nodups' in loginfo and loginfo in self.variables[var]: - return - self.variables[var].append(loginfo.copy()) - - def rename_variable_hist(self, oldvar, newvar): - if not self.dataroot._tracking: - return - if oldvar not in self.variables: - return - if newvar not in self.variables: - self.variables[newvar] = [] - for i in self.variables[oldvar]: - self.variables[newvar].append(i.copy()) - - def variable(self, var): - varhistory = [] - if var in self.variables: - varhistory.extend(self.variables[var]) - return varhistory - - def emit(self, var, oval, val, o, d): - history = self.variable(var) - - # Append override history - if var in d.overridedata: - for (r, override) in d.overridedata[var]: - for event in self.variable(r): - loginfo = event.copy() - if 'flag' in loginfo and not loginfo['flag'].startswith(("_", ":")): - continue - loginfo['variable'] = var - loginfo['op'] = 'override[%s]:%s' % (override, loginfo['op']) - history.append(loginfo) - - commentVal = re.sub('\n', '\n#', str(oval)) - if history: - if len(history) == 1: - o.write("#\n# $%s\n" % var) - else: - o.write("#\n# $%s [%d operations]\n" % (var, len(history))) - for event in history: - # o.write("# %s\n" % str(event)) - if 'func' in event: - # If we have a function listed, this is internal - # code, not an operation in a config file, and the - # full path is distracting. - event['file'] = re.sub('.*/', '', event['file']) - display_func = ' [%s]' % event['func'] - else: - display_func = '' - if 'flag' in event: - flag = '[%s] ' % (event['flag']) - else: - flag = '' - o.write("# %s %s:%s%s\n# %s\"%s\"\n" % \ - (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', str(event['detail'])))) - if len(history) > 1: - o.write("# pre-expansion value:\n") - o.write('# "%s"\n' % (commentVal)) - else: - o.write("#\n# $%s\n# [no history recorded]\n#\n" % var) - o.write('# "%s"\n' % (commentVal)) - - def get_variable_files(self, var): - """Get the files where operations are made on a variable""" - var_history = self.variable(var) - files = [] - for event in var_history: - files.append(event['file']) - return files - - def get_variable_lines(self, var, f): - """Get the line where a operation is made on a variable in file f""" - var_history = self.variable(var) - lines = [] - for event in var_history: - if f== event['file']: - line = event['line'] - lines.append(line) - return lines - - def get_variable_refs(self, var): - """Return a dict of file/line references""" - var_history = self.variable(var) - refs = {} - for event in var_history: - if event['file'] not in refs: - refs[event['file']] = [] - refs[event['file']].append(event['line']) - return refs - - def get_variable_items_files(self, var): - """ - Use variable history to map items added to a list variable and - the files in which they were added. - """ - d = self.dataroot - history = self.variable(var) - finalitems = (d.getVar(var) or '').split() - filemap = {} - isset = False - for event in history: - if 'flag' in event: - continue - if event['op'] == ':remove': - continue - if isset and event['op'] == 'set?': - continue - isset = True - items = d.expand(str(event['detail'])).split() - for item in items: - # This is a little crude but is belt-and-braces to avoid us - # having to handle every possible operation type specifically - if item in finalitems and not item in filemap: - filemap[item] = event['file'] - return filemap - - def del_var_history(self, var, f=None, line=None): - """If file f and line are not given, the entire history of var is deleted""" - if var in self.variables: - if f and line: - self.variables[var] = [ x for x in self.variables[var] if x['file']!=f and x['line']!=line] - else: - self.variables[var] = [] - -def _print_rename_error(var, loginfo, renamedvars, fullvar=None): - info = "" - if "file" in loginfo: - info = " file: %s" % loginfo["file"] - if "line" in loginfo: - info += " line: %s" % loginfo["line"] - if fullvar and fullvar != var: - info += " referenced as: %s" % fullvar - if info: - info = " (%s)" % info.strip() - renameinfo = renamedvars[var] - if " " in renameinfo: - # A space signals a string to display instead of a rename - bb.erroronce('Variable %s %s%s' % (var, renameinfo, info)) - else: - bb.erroronce('Variable %s has been renamed to %s%s' % (var, renameinfo, info)) - -class DataSmart(MutableMapping): - def __init__(self): - self.dict = {} - - self.inchistory = IncludeHistory() - self.varhistory = VariableHistory(self) - self.filters = {} - self._tracking = False - self._var_renames = {} - self._var_renames.update(bitbake_renamed_vars) - - self.expand_cache = {} - - # cookie monster tribute - # Need to be careful about writes to overridedata as - # its only a shallow copy, could influence other data store - # copies! - self.overridedata = {} - self.overrides = None - self.overridevars = set(["OVERRIDES", "FILE"]) - self.inoverride = False - - def enableTracking(self): - self._tracking = True - - def disableTracking(self): - self._tracking = False - - def expandWithRefs(self, s, varname): - - if not isinstance(s, str): # sanity check - return VariableParse(varname, self, s, s) - - varparse = VariableParse(varname, self, s) - - while s.find('${') != -1: - olds = s - try: - s = __expand_var_regexp__.sub(varparse.var_sub, s) - try: - s = __expand_python_regexp__.sub(varparse.python_sub, s) - except SyntaxError as e: - # Likely unmatched brackets, just don't expand the expression - if e.msg != "EOL while scanning string literal" and not e.msg.startswith("unterminated string literal"): - raise - if s == olds: - break - except ExpansionError as e: - e.addVar(varname) - raise - except bb.parse.SkipRecipe: - raise - except bb.BBHandledException: - raise - except Exception as exc: - tb = sys.exc_info()[2] - raise ExpansionError(varname, s, exc).with_traceback(tb) from exc - - varparse.value = s - - return varparse - - def expand(self, s, varname = None): - return self.expandWithRefs(s, varname).value - - def need_overrides(self): - if self.overrides is not None: - return - if self.inoverride: - return - overrride_stack = [] - for count in range(5): - self.inoverride = True - # Can end up here recursively so setup dummy values - self.overrides = [] - self.overridesset = set() - self.overrides = (self.getVar("OVERRIDES") or "").split(":") or [] - overrride_stack.append(self.overrides) - self.overridesset = set(self.overrides) - self.inoverride = False - self.expand_cache = {} - newoverrides = (self.getVar("OVERRIDES") or "").split(":") or [] - if newoverrides == self.overrides: - break - self.overrides = newoverrides - self.overridesset = set(self.overrides) - else: - bb.fatal("Overrides could not be expanded into a stable state after 5 iterations, overrides must be being referenced by other overridden variables in some recursive fashion. Please provide your configuration to bitbake-devel so we can laugh, er, I mean try and understand how to make it work. The list of failing override expansions: %s" % "\n".join(str(s) for s in overrride_stack)) - - def initVar(self, var): - self.expand_cache = {} - if not var in self.dict: - self.dict[var] = {} - - def _findVar(self, var): - dest = self.dict - while dest: - if var in dest: - return dest[var] - - if "_data" not in dest: - break - dest = dest["_data"] - return None - - def _makeShadowCopy(self, var): - if var in self.dict: - return - - local_var = self._findVar(var) - - if local_var: - self.dict[var] = copy.copy(local_var) - else: - self.initVar(var) - - def hasOverrides(self, var): - return var in self.overridedata - - def setVar(self, var, value, **loginfo): - #print("var=" + str(var) + " val=" + str(value)) - - if not var.startswith("__anon_") and ("_append" in var or "_prepend" in var or "_remove" in var): - info = "%s" % var - if "file" in loginfo: - info += " file: %s" % loginfo["file"] - if "line" in loginfo: - info += " line: %s" % loginfo["line"] - bb.fatal("Variable %s contains an operation using the old override syntax. Please convert this layer/metadata before attempting to use with a newer bitbake." % info) - - shortvar = var.split(":", 1)[0] - if shortvar in self._var_renames: - _print_rename_error(shortvar, loginfo, self._var_renames, fullvar=var) - # Mark that we have seen a renamed variable - self.setVar("_FAILPARSINGERRORHANDLED", True) - - self.expand_cache = {} - parsing=False - if 'parsing' in loginfo: - parsing=True - - if 'op' not in loginfo: - loginfo['op'] = "set" - - match = __setvar_regexp__.match(var) - if match and match.group("keyword") in __setvar_keyword__: - base = match.group('base') - keyword = match.group("keyword") - override = match.group('add') - l = self.getVarFlag(base, keyword, False) or [] - l.append([value, override]) - self.setVarFlag(base, keyword, l, ignore=True) - # And cause that to be recorded: - loginfo['detail'] = value - loginfo['variable'] = base - if override: - loginfo['op'] = '%s[%s]' % (keyword, override) - else: - loginfo['op'] = keyword - self.varhistory.record(**loginfo) - # pay the cookie monster - - # more cookies for the cookie monster - self._setvar_update_overrides(base, **loginfo) - - if base in self.overridevars: - self._setvar_update_overridevars(var, value) - return - - if not var in self.dict: - self._makeShadowCopy(var) - - if not parsing: - if ":append" in self.dict[var]: - del self.dict[var][":append"] - if ":prepend" in self.dict[var]: - del self.dict[var][":prepend"] - if ":remove" in self.dict[var]: - del self.dict[var][":remove"] - if var in self.overridedata: - active = [] - self.need_overrides() - for (r, o) in self.overridedata[var]: - if o in self.overridesset: - active.append(r) - elif ":" in o: - if set(o.split(":")).issubset(self.overridesset): - active.append(r) - for a in active: - self.delVar(a) - del self.overridedata[var] - - # more cookies for the cookie monster - if ':' in var: - self._setvar_update_overrides(var, **loginfo) - - # setting var - self.dict[var]["_content"] = value - self.varhistory.record(**loginfo) - - if var in self.overridevars: - self._setvar_update_overridevars(var, value) - - def _setvar_update_overridevars(self, var, value): - vardata = self.expandWithRefs(value, var) - new = vardata.references - new.update(vardata.contains.keys()) - while not new.issubset(self.overridevars): - nextnew = set() - self.overridevars.update(new) - for i in new: - vardata = self.expandWithRefs(self.getVar(i), i) - nextnew.update(vardata.references) - nextnew.update(vardata.contains.keys()) - new = nextnew - self.overrides = None - self.expand_cache = {} - - def _setvar_update_overrides(self, var, **loginfo): - # aka pay the cookie monster - override = var[var.rfind(':')+1:] - shortvar = var[:var.rfind(':')] - while override and __override_regexp__.match(override): - if shortvar not in self.overridedata: - self.overridedata[shortvar] = [] - if [var, override] not in self.overridedata[shortvar]: - # Force CoW by recreating the list first - self.overridedata[shortvar] = list(self.overridedata[shortvar]) - self.overridedata[shortvar].append([var, override]) - override = None - if ":" in shortvar: - override = var[shortvar.rfind(':')+1:] - shortvar = var[:shortvar.rfind(':')] - if len(shortvar) == 0: - override = None - - def getVar(self, var, expand=True, noweakdefault=False, parsing=False): - return self.getVarFlag(var, "_content", expand, noweakdefault, parsing) - - def renameVar(self, key, newkey, **loginfo): - """ - Rename the variable key to newkey - """ - if key == newkey: - bb.warn("Calling renameVar with equivalent keys (%s) is invalid" % key) - return - - val = self.getVar(key, 0, parsing=True) - if val is not None: - self.varhistory.rename_variable_hist(key, newkey) - loginfo['variable'] = newkey - loginfo['op'] = 'rename from %s' % key - loginfo['detail'] = val - self.varhistory.record(**loginfo) - self.setVar(newkey, val, ignore=True, parsing=True) - - srcflags = self.getVarFlags(key, False, True) or {} - for i in srcflags: - - if i not in (__setvar_keyword__): - continue - src = srcflags[i] - - dest = self.getVarFlag(newkey, i, False) or [] - dest.extend(src) - self.setVarFlag(newkey, i, dest, ignore=True) - - if key in self.overridedata: - self.overridedata[newkey] = [] - for (v, o) in self.overridedata[key]: - self.overridedata[newkey].append([v.replace(key, newkey), o]) - self.renameVar(v, v.replace(key, newkey)) - - if ':' in newkey and val is None: - self._setvar_update_overrides(newkey, **loginfo) - - loginfo['variable'] = key - loginfo['op'] = 'rename (to)' - loginfo['detail'] = newkey - self.varhistory.record(**loginfo) - self.delVar(key, ignore=True) - - def appendVar(self, var, value, **loginfo): - loginfo['op'] = 'append' - self.varhistory.record(**loginfo) - self.setVar(var + ":append", value, ignore=True, parsing=True) - - def prependVar(self, var, value, **loginfo): - loginfo['op'] = 'prepend' - self.varhistory.record(**loginfo) - self.setVar(var + ":prepend", value, ignore=True, parsing=True) - - def delVar(self, var, **loginfo): - self.expand_cache = {} - - loginfo['detail'] = "" - loginfo['op'] = 'del' - self.varhistory.record(**loginfo) - self.dict[var] = {} - if var in self.overridedata: - del self.overridedata[var] - if ':' in var: - override = var[var.rfind(':')+1:] - shortvar = var[:var.rfind(':')] - while override and __override_regexp__.match(override): - try: - if shortvar in self.overridedata: - # Force CoW by recreating the list first - self.overridedata[shortvar] = list(self.overridedata[shortvar]) - self.overridedata[shortvar].remove([var, override]) - except ValueError as e: - pass - override = None - if ":" in shortvar: - override = var[shortvar.rfind(':')+1:] - shortvar = var[:shortvar.rfind(':')] - if len(shortvar) == 0: - override = None - - def setVarFlag(self, var, flag, value, **loginfo): - self.expand_cache = {} - - if var == "BB_RENAMED_VARIABLES": - self._var_renames[flag] = value - - if var in self._var_renames: - _print_rename_error(var, loginfo, self._var_renames) - # Mark that we have seen a renamed variable - self.setVar("_FAILPARSINGERRORHANDLED", True) - - if 'op' not in loginfo: - loginfo['op'] = "set" - loginfo['flag'] = flag - self.varhistory.record(**loginfo) - if not var in self.dict: - self._makeShadowCopy(var) - self.dict[var][flag] = value - - if flag == "_defaultval" and ':' in var: - self._setvar_update_overrides(var, **loginfo) - if flag == "_defaultval" and var in self.overridevars: - self._setvar_update_overridevars(var, value) - - if flag == "unexport" or flag == "export": - if not "__exportlist" in self.dict: - self._makeShadowCopy("__exportlist") - if not "_content" in self.dict["__exportlist"]: - self.dict["__exportlist"]["_content"] = set() - self.dict["__exportlist"]["_content"].add(var) - - def getVarFlag(self, var, flag, expand=True, noweakdefault=False, parsing=False, retparser=False): - if flag == "_content": - cachename = var - else: - if not flag: - bb.warn("Calling getVarFlag with flag unset is invalid") - return None - cachename = var + "[" + flag + "]" - - if not expand and retparser and cachename in self.expand_cache: - return self.expand_cache[cachename].unexpanded_value, self.expand_cache[cachename] - - if expand and cachename in self.expand_cache: - return self.expand_cache[cachename].value - - local_var = self._findVar(var) - value = None - removes = set() - if flag == "_content" and not parsing: - overridedata = self.overridedata.get(var, None) - if flag == "_content" and not parsing and overridedata is not None: - match = False - active = {} - self.need_overrides() - for (r, o) in overridedata: - # FIXME What about double overrides both with "_" in the name? - if o in self.overridesset: - active[o] = r - elif ":" in o: - if set(o.split(":")).issubset(self.overridesset): - active[o] = r - - mod = True - while mod: - mod = False - for o in self.overrides: - for a in active.copy(): - if a.endswith(":" + o): - t = active[a] - del active[a] - active[a.replace(":" + o, "")] = t - mod = True - elif a == o: - match = active[a] - del active[a] - if match: - value, subparser = self.getVarFlag(match, "_content", False, retparser=True) - if hasattr(subparser, "removes"): - # We have to carry the removes from the overridden variable to apply at the - # end of processing - removes = subparser.removes - - if local_var is not None and value is None: - if flag in local_var: - value = copy.copy(local_var[flag]) - elif flag == "_content" and "_defaultval" in local_var and not noweakdefault: - value = copy.copy(local_var["_defaultval"]) - elif "_defaultval_flag_"+flag in local_var and not noweakdefault: - value = copy.copy(local_var["_defaultval_flag_"+flag]) - - - if flag == "_content" and local_var is not None and ":append" in local_var and not parsing: - self.need_overrides() - for (r, o) in local_var[":append"]: - match = True - if o: - for o2 in o.split(":"): - if not o2 in self.overrides: - match = False - if match: - if value is None: - value = "" - value = value + r - - if flag == "_content" and local_var is not None and ":prepend" in local_var and not parsing: - self.need_overrides() - for (r, o) in local_var[":prepend"]: - - match = True - if o: - for o2 in o.split(":"): - if not o2 in self.overrides: - match = False - if match: - if value is None: - value = "" - value = r + value - - parser = None - if expand or retparser: - parser = self.expandWithRefs(value, cachename) - if expand: - value = parser.value - - if value and flag == "_content" and local_var is not None and ":remove" in local_var and not parsing: - self.need_overrides() - for (r, o) in local_var[":remove"]: - match = True - if o: - for o2 in o.split(":"): - if not o2 in self.overrides: - match = False - if match: - removes.add(r) - - if value and flag == "_content" and not parsing: - if removes and parser: - expanded_removes = {} - for r in removes: - expanded_removes[r] = self.expand(r).split() - - parser.removes = set() - val = [] - for v in __whitespace_split__.split(parser.value): - skip = False - for r in removes: - if v in expanded_removes[r]: - parser.removes.add(r) - skip = True - if skip: - continue - val.append(v) - parser.value = "".join(val) - if expand: - value = parser.value - - if value and expand and flag == "_content": - basevar = var.split(":")[0] - if basevar in self.filters: - value = bb.filter.apply_filters(value, [self.filters[basevar],]) - parser.value = value - - if parser: - self.expand_cache[cachename] = parser - - if retparser: - return value, parser - - return value - - def delVarFlag(self, var, flag, **loginfo): - self.expand_cache = {} - - local_var = self._findVar(var) - if not local_var: - return - if not var in self.dict: - self._makeShadowCopy(var) - - if var in self.dict and flag in self.dict[var]: - loginfo['detail'] = "" - loginfo['op'] = 'delFlag' - loginfo['flag'] = flag - self.varhistory.record(**loginfo) - - del self.dict[var][flag] - if ("_defaultval_flag_" + flag) in self.dict[var]: - del self.dict[var]["_defaultval_flag_" + flag] - - def appendVarFlag(self, var, flag, value, **loginfo): - loginfo['op'] = 'append' - loginfo['flag'] = flag - self.varhistory.record(**loginfo) - newvalue = (self.getVarFlag(var, flag, False) or "") + value - self.setVarFlag(var, flag, newvalue, ignore=True) - - def prependVarFlag(self, var, flag, value, **loginfo): - loginfo['op'] = 'prepend' - loginfo['flag'] = flag - self.varhistory.record(**loginfo) - newvalue = value + (self.getVarFlag(var, flag, False) or "") - self.setVarFlag(var, flag, newvalue, ignore=True) - - def setVarFlags(self, var, flags, **loginfo): - self.expand_cache = {} - infer_caller_details(loginfo) - if not var in self.dict: - self._makeShadowCopy(var) - - for i in flags: - if i == "_content": - continue - loginfo['flag'] = i - loginfo['detail'] = flags[i] - self.varhistory.record(**loginfo) - self.dict[var][i] = flags[i] - - def getVarFlags(self, var, expand = False, internalflags=False): - local_var = self._findVar(var) - flags = {} - - if local_var: - for i, val in local_var.items(): - if i.startswith("_defaultval_flag_") and not internalflags: - i = i[len("_defaultval_flag_"):] - if i not in local_var: - flags[i] = val - elif i.startswith(("_", ":")) and not internalflags: - continue - else: - flags[i] = val - - if expand and i in expand: - flags[i] = self.expand(flags[i], var + "[" + i + "]") - if len(flags) == 0: - return None - return flags - - def delVarFlags(self, var, **loginfo): - self.expand_cache = {} - if not var in self.dict: - self._makeShadowCopy(var) - - if var in self.dict: - content = None - - loginfo['op'] = 'delete flags' - self.varhistory.record(**loginfo) - - # try to save the content - if "_content" in self.dict[var]: - content = self.dict[var]["_content"] - self.dict[var] = {} - self.dict[var]["_content"] = content - else: - del self.dict[var] - - def createCopy(self): - """ - Create a copy of self by setting _data to self - """ - # we really want this to be a DataSmart... - data = DataSmart() - data.dict["_data"] = self.dict - data.varhistory = self.varhistory.copy() - data.varhistory.dataroot = data - data.inchistory = self.inchistory.copy() - data.filters = self.filters.copy() - - data._tracking = self._tracking - data._var_renames = self._var_renames - - data.overrides = None - data.overridevars = copy.copy(self.overridevars) - # Should really be a deepcopy but has heavy overhead. - # Instead, we're careful with writes. - data.overridedata = copy.copy(self.overridedata) - - return data - - def expandVarref(self, variable, parents=False): - """Find all references to variable in the data and expand it - in place, optionally descending to parent datastores.""" - - if parents: - keys = iter(self) - else: - keys = self.localkeys() - - ref = '${%s}' % variable - value = self.getVar(variable, False) - for key in keys: - referrervalue = self.getVar(key, False) - if referrervalue and isinstance(referrervalue, str) and ref in referrervalue: - self.setVar(key, referrervalue.replace(ref, value)) - - def setVarFilter(self, var, filter): - if filter: - self.filters[var] = filter - else: - try: - del self.filters[var] - except KeyError: - pass - - def localkeys(self): - for key in self.dict: - if key not in ['_data']: - yield key - - def __iter__(self): - deleted = set() - overrides = set() - def keylist(d): - klist = set() - for key in d: - if key in ["_data"]: - continue - if key in deleted: - continue - if key in overrides: - continue - if not d[key]: - deleted.add(key) - continue - klist.add(key) - - if "_data" in d: - klist |= keylist(d["_data"]) - - return klist - - self.need_overrides() - for var in self.overridedata: - for (r, o) in self.overridedata[var]: - if o in self.overridesset: - overrides.add(var) - elif ":" in o: - if set(o.split(":")).issubset(self.overridesset): - overrides.add(var) - - for k in keylist(self.dict): - yield k - - for k in overrides: - yield k - - def __len__(self): - return len(frozenset(iter(self))) - - def __getitem__(self, item): - value = self.getVar(item, False) - if value is None: - raise KeyError(item) - else: - return value - - def __setitem__(self, var, value): - self.setVar(var, value) - - def __delitem__(self, var): - self.delVar(var) - - def get_hash(self): - data = {} - d = self.createCopy() - bb.data.expandKeys(d) - - config_ignore_vars = set((d.getVar("BB_HASHCONFIG_IGNORE_VARS") or "").split()) - keys = set(key for key in iter(d) if not key.startswith("__")) - for key in keys: - if key in config_ignore_vars: - continue - - value = d.getVar(key, False) or "" - if type(value) is type(self): - data.update({key:value.get_hash()}) - else: - data.update({key:value}) - - varflags = d.getVarFlags(key, internalflags = True, expand=["vardepvalue"]) - if not varflags: - continue - for f in varflags: - if f == "_content": - continue - data.update({'%s[%s]' % (key, f):varflags[f]}) - - for key in ["__BBTASKS", "__BBANONFUNCS", "__BBHANDLERS"]: - bb_list = d.getVar(key, False) or [] - data.update({key:str(bb_list)}) - - if key == "__BBANONFUNCS": - for i in bb_list: - value = d.getVar(i, False) or "" - data.update({i:value}) - - moddeps = bb.codeparser.modulecode_deps - for dep in sorted(moddeps): - # Ignore visitor code, sort sets - data.update({'moddep[%s]' % dep : [sorted(moddeps[dep][0]), sorted(moddeps[dep][1]), sorted(moddeps[dep][2]), sorted(moddeps[dep][3]), moddeps[dep][4]]}) - - data_str = str([(k, data[k]) for k in sorted(data.keys())]) - return hashlib.sha256(data_str.encode("utf-8")).hexdigest() diff --git a/bitbake/lib/bb/event.py b/bitbake/lib/bb/event.py deleted file mode 100644 index ddf1006c29..0000000000 --- a/bitbake/lib/bb/event.py +++ /dev/null @@ -1,889 +0,0 @@ -""" -BitBake 'Event' implementation - -Classes and functions for manipulating 'events' in the -BitBake build tools. -""" - -# Copyright (C) 2003, 2004 Chris Larson -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import ast -import atexit -import collections -import logging -import pickle -import sys -import threading -import traceback - -import bb.utils - -# This is the pid for which we should generate the event. This is set when -# the runqueue forks off. -worker_pid = 0 -worker_fire = None - -logger = logging.getLogger('BitBake.Event') - -class Event(object): - """Base class for events""" - - def __init__(self): - self.pid = worker_pid - - -class HeartbeatEvent(Event): - """Triggered at regular time intervals of 10 seconds. Other events can fire much more often - (runQueueTaskStarted when there are many short tasks) or not at all for long periods - of time (again runQueueTaskStarted, when there is just one long-running task), so this - event is more suitable for doing some task-independent work occasionally.""" - def __init__(self, time): - Event.__init__(self) - self.time = time - -Registered = 10 -AlreadyRegistered = 14 - -def get_class_handlers(): - return _handlers - -def set_class_handlers(h): - global _handlers - _handlers = h - -def clean_class_handlers(): - return collections.OrderedDict() - -# Internal -_handlers = clean_class_handlers() -_ui_handlers = {} -_ui_logfilters = {} -_ui_handler_seq = 0 -_event_handler_map = {} -_catchall_handlers = {} -_eventfilter = None -_uiready = False -_thread_lock = threading.Lock() -_heartbeat_enabled = False -_should_exit = threading.Event() - -def enable_threadlock(): - # Always needed now - return - -def disable_threadlock(): - # Always needed now - return - -def enable_heartbeat(): - global _heartbeat_enabled - _heartbeat_enabled = True - -def disable_heartbeat(): - global _heartbeat_enabled - _heartbeat_enabled = False - -# -# In long running code, this function should be called periodically -# to check if we should exit due to an interuption (.e.g Ctrl+C from the UI) -# -def check_for_interrupts(): - global _should_exit - if _should_exit.is_set(): - bb.warn("Exiting due to interrupt.") - raise bb.BBHandledException() - -def execute_handler(name, handler, event, d): - event.data = d - try: - ret = handler(event, d) - except (bb.parse.SkipRecipe, bb.BBHandledException): - raise - except Exception: - etype, value, tb = sys.exc_info() - logger.error("Execution of event handler '%s' failed" % name, - exc_info=(etype, value, tb.tb_next)) - raise - except SystemExit as exc: - if exc.code != 0: - logger.error("Execution of event handler '%s' failed" % name) - raise - finally: - del event.data - - -def fire_class_handlers(event, d): - if isinstance(event, logging.LogRecord): - return - - eid = str(event.__class__)[8:-2] - evt_hmap = _event_handler_map.get(eid, {}) - for name, handler in list(_handlers.items()): - if name in _catchall_handlers or name in evt_hmap: - if _eventfilter: - if not _eventfilter(name, handler, event, d): - continue - if d is not None and not name in (d.getVar("__BBHANDLERS_MC") or set()): - continue - execute_handler(name, handler, event, d) - -ui_queue = [] -@atexit.register -def print_ui_queue(): - global ui_queue - """If we're exiting before a UI has been spawned, display any queued - LogRecords to the console.""" - logger = logging.getLogger("BitBake") - if not _uiready: - from bb.msg import BBLogFormatter - # Flush any existing buffered content - try: - sys.stdout.flush() - except: - pass - try: - sys.stderr.flush() - except: - pass - stdout = logging.StreamHandler(sys.stdout) - stderr = logging.StreamHandler(sys.stderr) - formatter = BBLogFormatter("%(levelname)s: %(message)s") - stdout.setFormatter(formatter) - stderr.setFormatter(formatter) - - # First check to see if we have any proper messages - msgprint = False - msgerrs = False - - # Should we print to stderr? - for event in ui_queue[:]: - if isinstance(event, logging.LogRecord) and event.levelno >= logging.WARNING: - msgerrs = True - break - - if msgerrs: - logger.addHandler(stderr) - else: - logger.addHandler(stdout) - - for event in ui_queue[:]: - if isinstance(event, logging.LogRecord): - if event.levelno > logging.DEBUG: - logger.handle(event) - msgprint = True - - # Nope, so just print all of the messages we have (including debug messages) - if not msgprint: - for event in ui_queue[:]: - if isinstance(event, logging.LogRecord): - logger.handle(event) - if msgerrs: - logger.removeHandler(stderr) - else: - logger.removeHandler(stdout) - ui_queue = [] - -def fire_ui_handlers(event, d): - global _thread_lock - - if not _uiready: - # No UI handlers registered yet, queue up the messages - ui_queue.append(event) - return - - with bb.utils.lock_timeout_nocheck(_thread_lock) as lock: - if not lock: - # If we can't get the lock, we may be recursively called, queue and return - ui_queue.append(event) - return - - errors = [] - for h in _ui_handlers: - #print "Sending event %s" % event - try: - if not _ui_logfilters[h].filter(event): - continue - # We use pickle here since it better handles object instances - # which xmlrpc's marshaller does not. Events *must* be serializable - # by pickle. - if hasattr(_ui_handlers[h].event, "sendpickle"): - _ui_handlers[h].event.sendpickle((pickle.dumps(event))) - else: - _ui_handlers[h].event.send(event) - except: - errors.append(h) - for h in errors: - del _ui_handlers[h] - - while ui_queue: - fire_ui_handlers(ui_queue.pop(), d) - -def fire(event, d): - """Fire off an Event""" - - # We can fire class handlers in the worker process context and this is - # desired so they get the task based datastore. - # UI handlers need to be fired in the server context so we defer this. They - # don't have a datastore so the datastore context isn't a problem. - - fire_class_handlers(event, d) - if worker_fire: - worker_fire(event, d) - else: - # If messages have been queued up, clear the queue - global _uiready, ui_queue - if _uiready and ui_queue: - with bb.utils.lock_timeout_nocheck(_thread_lock): - queue = ui_queue - ui_queue = [] - for queue_event in queue: - fire_ui_handlers(queue_event, d) - - fire_ui_handlers(event, d) - -def fire_from_worker(event, d): - fire_ui_handlers(event, d) - -noop = lambda _: None -def register(name, handler, mask=None, filename=None, lineno=None, data=None): - """Register an Event handler""" - - if data is not None and data.getVar("BB_CURRENT_MC"): - mc = data.getVar("BB_CURRENT_MC") - name = '%s%s' % (mc.replace('-', '_'), name) - - # already registered - if name in _handlers: - if data is not None: - bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set()) - bbhands_mc.add(name) - data.setVar("__BBHANDLERS_MC", bbhands_mc) - return AlreadyRegistered - - if handler is not None: - # handle string containing python code - if isinstance(handler, str): - tmp = "def %s(e, d):\n%s" % (name, handler) - # Inject empty lines to make code match lineno in filename - if lineno is not None: - tmp = "\n" * (lineno-1) + tmp - try: - code = bb.methodpool.compile_cache(tmp) - if not code: - if filename is None: - filename = "%s(e, d)" % name - code = compile(tmp, filename, "exec", ast.PyCF_ONLY_AST) - code = compile(code, filename, "exec") - bb.methodpool.compile_cache_add(tmp, code) - except SyntaxError: - logger.error("Unable to register event handler '%s':\n%s", name, - ''.join(traceback.format_exc(limit=0))) - _handlers[name] = noop - return - env = {} - bb.utils.better_exec(code, env) - func = bb.utils.better_eval(name, env) - _handlers[name] = func - else: - _handlers[name] = handler - - if not mask or '*' in mask: - _catchall_handlers[name] = True - else: - for m in mask: - if _event_handler_map.get(m, None) is None: - _event_handler_map[m] = {} - _event_handler_map[m][name] = True - - if data is not None: - bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set()) - bbhands_mc.add(name) - data.setVar("__BBHANDLERS_MC", bbhands_mc) - - return Registered - -def remove(name, handler, data=None): - """Remove an Event handler""" - if data is not None: - if data.getVar("BB_CURRENT_MC"): - mc = data.getVar("BB_CURRENT_MC") - name = '%s%s' % (mc.replace('-', '_'), name) - - _handlers.pop(name) - if name in _catchall_handlers: - _catchall_handlers.pop(name) - for event in _event_handler_map.keys(): - if name in _event_handler_map[event]: - _event_handler_map[event].pop(name) - - if data is not None: - bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set()) - if name in bbhands_mc: - bbhands_mc.remove(name) - data.setVar("__BBHANDLERS_MC", bbhands_mc) - -def get_handlers(): - return _handlers - -def set_handlers(handlers): - global _handlers - _handlers = handlers - -def set_eventfilter(func): - global _eventfilter - _eventfilter = func - -def register_UIHhandler(handler, mainui=False): - with bb.utils.lock_timeout(_thread_lock): - bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1 - _ui_handlers[_ui_handler_seq] = handler - level, debug_domains = bb.msg.constructLogOptions() - _ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains) - if mainui: - global _uiready - _uiready = _ui_handler_seq - return _ui_handler_seq - -def unregister_UIHhandler(handlerNum, mainui=False): - if mainui: - global _uiready - _uiready = False - with bb.utils.lock_timeout(_thread_lock): - if handlerNum in _ui_handlers: - del _ui_handlers[handlerNum] - return - -def get_uihandler(): - if _uiready is False: - return None - return _uiready - -# Class to allow filtering of events and specific filtering of LogRecords *before* we put them over the IPC -class UIEventFilter(object): - def __init__(self, level, debug_domains): - self.update(None, level, debug_domains) - - def update(self, eventmask, level, debug_domains): - self.eventmask = eventmask - self.stdlevel = level - self.debug_domains = debug_domains - - def filter(self, event): - if isinstance(event, logging.LogRecord): - if event.levelno >= self.stdlevel: - return True - if event.name in self.debug_domains and event.levelno >= self.debug_domains[event.name]: - return True - return False - eid = str(event.__class__)[8:-2] - if self.eventmask and eid not in self.eventmask: - return False - return True - -def set_UIHmask(handlerNum, level, debug_domains, mask): - if not handlerNum in _ui_handlers: - return False - if '*' in mask: - _ui_logfilters[handlerNum].update(None, level, debug_domains) - else: - _ui_logfilters[handlerNum].update(mask, level, debug_domains) - return True - -def getName(e): - """Returns the name of a class or class instance""" - if getattr(e, "__name__", None) is None: - return e.__class__.__name__ - else: - return e.__name__ - -class OperationStarted(Event): - """An operation has begun""" - def __init__(self, msg = "Operation Started"): - Event.__init__(self) - self.msg = msg - -class OperationCompleted(Event): - """An operation has completed""" - def __init__(self, total, msg = "Operation Completed"): - Event.__init__(self) - self.total = total - self.msg = msg - -class OperationProgress(Event): - """An operation is in progress""" - def __init__(self, current, total, msg = "Operation in Progress"): - Event.__init__(self) - self.current = current - self.total = total - self.msg = msg + ": %s/%s" % (current, total); - -class ConfigParsed(Event): - """Configuration Parsing Complete""" - -class MultiConfigParsed(Event): - """Multi-Config Parsing Complete""" - def __init__(self, mcdata): - self.mcdata = mcdata - Event.__init__(self) - -class RecipeEvent(Event): - def __init__(self, fn): - self.fn = fn - Event.__init__(self) - -class RecipePreDeferredInherits(RecipeEvent): - """ - Called before deferred inherits are processed so code can snoop on class extensions for example - Limitations: It won't see inherits of inherited classes and the data is unexpanded - """ - def __init__(self, fn, inherits): - self.fn = fn - self.inherits = inherits - Event.__init__(self) - -class RecipePreFinalise(RecipeEvent): - """ Recipe Parsing Complete but not yet finalised""" - -class RecipePostKeyExpansion(RecipeEvent): - """ Recipe Parsing Complete but not yet finalised""" - - -class RecipeTaskPreProcess(RecipeEvent): - """ - Recipe Tasks about to be finalised - The list of tasks should be final at this point and handlers - are only able to change interdependencies - """ - def __init__(self, fn, tasklist): - self.fn = fn - self.tasklist = tasklist - Event.__init__(self) - -class RecipeParsed(RecipeEvent): - """ Recipe Parsing Complete """ - -class BuildBase(Event): - """Base class for bitbake build events""" - - def __init__(self, n, p, failures = 0): - self._name = n - self._pkgs = p - Event.__init__(self) - self._failures = failures - - def getPkgs(self): - return self._pkgs - - def setPkgs(self, pkgs): - self._pkgs = pkgs - - def getName(self): - return self._name - - def setName(self, name): - self._name = name - - def getFailures(self): - """ - Return the number of failed packages - """ - return self._failures - - pkgs = property(getPkgs, setPkgs, None, "pkgs property") - name = property(getName, setName, None, "name property") - -class BuildInit(BuildBase): - """buildFile or buildTargets was invoked""" - def __init__(self, p=[]): - name = None - BuildBase.__init__(self, name, p) - -class BuildStarted(BuildBase, OperationStarted): - """Event when builds start""" - def __init__(self, n, p, failures = 0): - OperationStarted.__init__(self, "Building Started") - BuildBase.__init__(self, n, p, failures) - -class BuildCompleted(BuildBase, OperationCompleted): - """Event when builds have completed""" - def __init__(self, total, n, p, failures=0, interrupted=0): - if not failures: - OperationCompleted.__init__(self, total, "Building Succeeded") - else: - OperationCompleted.__init__(self, total, "Building Failed") - self._interrupted = interrupted - BuildBase.__init__(self, n, p, failures) - -class DiskFull(Event): - """Disk full case build halted""" - def __init__(self, dev, type, freespace, mountpoint): - Event.__init__(self) - self._dev = dev - self._type = type - self._free = freespace - self._mountpoint = mountpoint - -class DiskUsageSample: - def __init__(self, available_bytes, free_bytes, total_bytes): - # Number of bytes available to non-root processes. - self.available_bytes = available_bytes - # Number of bytes available to root processes. - self.free_bytes = free_bytes - # Total capacity of the volume. - self.total_bytes = total_bytes - -class MonitorDiskEvent(Event): - """If BB_DISKMON_DIRS is set, then this event gets triggered each time disk space is checked. - Provides information about devices that are getting monitored.""" - def __init__(self, disk_usage): - Event.__init__(self) - # hash of device root path -> DiskUsageSample - self.disk_usage = disk_usage - -class NoProvider(Event): - """No Provider for an Event""" - - def __init__(self, item, runtime=False, dependees=None, reasons=None, close_matches=None): - Event.__init__(self) - self._item = item - self._runtime = runtime - self._dependees = dependees - self._reasons = reasons - self._close_matches = close_matches - - def getItem(self): - return self._item - - def isRuntime(self): - return self._runtime - - def __str__(self): - msg = '' - if self._runtime: - r = "R" - else: - r = "" - - extra = '' - if not self._reasons: - if self._close_matches: - extra = ". Close matches:\n %s" % '\n '.join(sorted(set(self._close_matches))) - - if self._dependees: - msg = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % (r, self._item, ", ".join(self._dependees), r, extra) - else: - msg = "Nothing %sPROVIDES '%s'%s" % (r, self._item, extra) - if self._reasons: - for reason in self._reasons: - msg += '\n' + reason - return msg - - -class MultipleProviders(Event): - """Multiple Providers""" - - def __init__(self, item, candidates, runtime = False): - Event.__init__(self) - self._item = item - self._candidates = candidates - self._is_runtime = runtime - - def isRuntime(self): - """ - Is this a runtime issue? - """ - return self._is_runtime - - def getItem(self): - """ - The name for the to be build item - """ - return self._item - - def getCandidates(self): - """ - Get the possible Candidates for a PROVIDER. - """ - return self._candidates - - def __str__(self): - msg = "Multiple providers are available for %s%s (%s)" % (self._is_runtime and "runtime " or "", - self._item, - ", ".join(self._candidates)) - rtime = "" - if self._is_runtime: - rtime = "R" - msg += "\nConsider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, self._item) - return msg - -class ParseStarted(OperationStarted): - """Recipe parsing for the runqueue has begun""" - def __init__(self, total): - OperationStarted.__init__(self, "Recipe parsing Started") - self.total = total - -class ParseCompleted(OperationCompleted): - """Recipe parsing for the runqueue has completed""" - def __init__(self, cached, parsed, skipped, masked, virtuals, errors, total): - OperationCompleted.__init__(self, total, "Recipe parsing Completed") - self.cached = cached - self.parsed = parsed - self.skipped = skipped - self.virtuals = virtuals - self.masked = masked - self.errors = errors - self.sofar = cached + parsed - -class ParseProgress(OperationProgress): - """Recipe parsing progress""" - def __init__(self, current, total): - OperationProgress.__init__(self, current, total, "Recipe parsing") - - -class CacheLoadStarted(OperationStarted): - """Loading of the dependency cache has begun""" - def __init__(self, total): - OperationStarted.__init__(self, "Loading cache Started") - self.total = total - -class CacheLoadProgress(OperationProgress): - """Cache loading progress""" - def __init__(self, current, total): - OperationProgress.__init__(self, current, total, "Loading cache") - -class CacheLoadCompleted(OperationCompleted): - """Cache loading is complete""" - def __init__(self, total, num_entries): - OperationCompleted.__init__(self, total, "Loading cache Completed") - self.num_entries = num_entries - -class TreeDataPreparationStarted(OperationStarted): - """Tree data preparation started""" - def __init__(self): - OperationStarted.__init__(self, "Preparing tree data Started") - -class TreeDataPreparationProgress(OperationProgress): - """Tree data preparation is in progress""" - def __init__(self, current, total): - OperationProgress.__init__(self, current, total, "Preparing tree data") - -class TreeDataPreparationCompleted(OperationCompleted): - """Tree data preparation completed""" - def __init__(self, total): - OperationCompleted.__init__(self, total, "Preparing tree data Completed") - -class DepTreeGenerated(Event): - """ - Event when a dependency tree has been generated - """ - - def __init__(self, depgraph): - Event.__init__(self) - self._depgraph = depgraph - -class TargetsTreeGenerated(Event): - """ - Event when a set of buildable targets has been generated - """ - def __init__(self, model): - Event.__init__(self) - self._model = model - -class ReachableStamps(Event): - """ - An event listing all stamps reachable after parsing - which the metadata may use to clean up stale data - """ - - def __init__(self, stamps): - Event.__init__(self) - self.stamps = stamps - -class StaleSetSceneTasks(Event): - """ - An event listing setscene tasks which are 'stale' and will - be rerun. The metadata may use to clean up stale data. - tasks is a mapping of tasks and matching stale stamps. - """ - - def __init__(self, tasks): - Event.__init__(self) - self.tasks = tasks - -class FilesMatchingFound(Event): - """ - Event when a list of files matching the supplied pattern has - been generated - """ - def __init__(self, pattern, matches): - Event.__init__(self) - self._pattern = pattern - self._matches = matches - -class ConfigFilesFound(Event): - """ - Event when a list of appropriate config files has been generated - """ - def __init__(self, variable, values): - Event.__init__(self) - self._variable = variable - self._values = values - -class ConfigFilePathFound(Event): - """ - Event when a path for a config file has been found - """ - def __init__(self, path): - Event.__init__(self) - self._path = path - -class MsgBase(Event): - """Base class for messages""" - - def __init__(self, msg): - self._message = msg - Event.__init__(self) - -class MsgDebug(MsgBase): - """Debug Message""" - -class MsgNote(MsgBase): - """Note Message""" - -class MsgWarn(MsgBase): - """Warning Message""" - -class MsgError(MsgBase): - """Error Message""" - -class MsgFatal(MsgBase): - """Fatal Message""" - -class MsgPlain(MsgBase): - """General output""" - -class LogExecTTY(Event): - """Send event containing program to spawn on tty of the logger""" - def __init__(self, msg, prog, sleep_delay, retries): - Event.__init__(self) - self.msg = msg - self.prog = prog - self.sleep_delay = sleep_delay - self.retries = retries - -class LogHandler(logging.Handler): - """Dispatch logging messages as bitbake events""" - - def emit(self, record): - if record.exc_info: - record.bb_exc_formatted = traceback.format_exception(*record.exc_info) - record.exc_info = None - fire(record, None) - - def filter(self, record): - record.taskpid = worker_pid - return True - -class MetadataEvent(Event): - """ - Generic event that target for OE-Core classes - to report information during asynchronous execution - """ - def __init__(self, eventtype, eventdata): - Event.__init__(self) - self.type = eventtype - self._localdata = eventdata - -class ProcessStarted(Event): - """ - Generic process started event (usually part of the initial startup) - where further progress events will be delivered - """ - def __init__(self, processname, total): - Event.__init__(self) - self.processname = processname - self.total = total - -class ProcessProgress(Event): - """ - Generic process progress event (usually part of the initial startup) - """ - def __init__(self, processname, progress): - Event.__init__(self) - self.processname = processname - self.progress = progress - -class ProcessFinished(Event): - """ - Generic process finished event (usually part of the initial startup) - """ - def __init__(self, processname): - Event.__init__(self) - self.processname = processname - -class SanityCheck(Event): - """ - Event to run sanity checks, either raise errors or generate events as return status. - """ - def __init__(self, generateevents = True): - Event.__init__(self) - self.generateevents = generateevents - -class SanityCheckPassed(Event): - """ - Event to indicate sanity check has passed - """ - -class SanityCheckFailed(Event): - """ - Event to indicate sanity check has failed - """ - def __init__(self, msg, network_error=False): - Event.__init__(self) - self._msg = msg - self._network_error = network_error - -class NetworkTest(Event): - """ - Event to run network connectivity tests, either raise errors or generate events as return status. - """ - def __init__(self, generateevents = True): - Event.__init__(self) - self.generateevents = generateevents - -class NetworkTestPassed(Event): - """ - Event to indicate network test has passed - """ - -class NetworkTestFailed(Event): - """ - Event to indicate network test has failed - """ - -class FindSigInfoResult(Event): - """ - Event to return results from findSigInfo command - """ - def __init__(self, result): - Event.__init__(self) - self.result = result - -class GetTaskSignatureResult(Event): - """ - Event to return results from GetTaskSignatures command - """ - def __init__(self, sig): - Event.__init__(self) - self.sig = sig - -class ParseError(Event): - """ - Event to indicate parse failed - """ - def __init__(self, msg): - super().__init__() - self._msg = msg diff --git a/bitbake/lib/bb/fetch2/README b/bitbake/lib/bb/fetch2/README deleted file mode 100644 index 67b787ef47..0000000000 --- a/bitbake/lib/bb/fetch2/README +++ /dev/null @@ -1,57 +0,0 @@ -There are expectations of users of the fetcher code. This file attempts to document -some of the constraints that are present. Some are obvious, some are less so. It is -documented in the context of how OE uses it but the API calls are generic. - -a) network access for sources is only expected to happen in the do_fetch task. - This is not enforced or tested but is required so that we can: - - i) audit the sources used (i.e. for license/manifest reasons) - ii) support offline builds with a suitable cache - iii) allow work to continue even with downtime upstream - iv) allow for changes upstream in incompatible ways - v) allow rebuilding of the software in X years time - -b) network access is not expected in do_unpack task. - -c) you can take DL_DIR and use it as a mirror for offline builds. - -d) access to the network is only made when explicitly configured in recipes - (e.g. use of AUTOREV, or use of git tags which change revision). - -e) fetcher output is deterministic (i.e. if you fetch configuration XXX now it - will match in future exactly in a clean build with a new DL_DIR). - One specific pain point example are git tags. They can be replaced and change - so the git fetcher has to resolve them with the network. We use git revisions - where possible to avoid this and ensure determinism. - -f) network access is expected to work with the standard linux proxy variables - so that access behind firewalls works (the fetcher sets these in the - environment but only in the do_fetch tasks). - -g) access during parsing has to be minimal, a "git ls-remote" for an AUTOREV - git recipe might be ok but you can't expect to checkout a git tree. - -h) we need to provide revision information during parsing such that a version - for the recipe can be constructed. - -i) versions are expected to be able to increase in a way which sorts allowing - package feeds to operate (see PR server required for git revisions to sort). - -j) API to query for possible version upgrades of a url is highly desireable to - allow our automated upgrage code to function (it is implied this does always - have network access). - -k) Where fixes or changes to behaviour in the fetcher are made, we ask that - test cases are added (run with "bitbake-selftest bb.tests.fetch"). We do - have fairly extensive test coverage of the fetcher as it is the only way - to track all of its corner cases, it still doesn't give entire coverage - though sadly. - -l) If using tools during parse time, they will have to be in ASSUME_PROVIDED - in OE's context as we can't build git-native, then parse a recipe and use - git ls-remote. - -Not all fetchers support all features, autorev is optional and doesn't make -sense for some. Upgrade detection means different things in different contexts -too. - diff --git a/bitbake/lib/bb/fetch2/__init__.py b/bitbake/lib/bb/fetch2/__init__.py deleted file mode 100644 index 0ad987c596..0000000000 --- a/bitbake/lib/bb/fetch2/__init__.py +++ /dev/null @@ -1,2144 +0,0 @@ -""" -BitBake 'Fetch' implementations - -Classes for obtaining upstream sources for the -BitBake build tools. -""" - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2012 Intel Corporation -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import os, re -import signal -import logging -import urllib.request, urllib.parse, urllib.error -if 'git' not in urllib.parse.uses_netloc: - urllib.parse.uses_netloc.append('git') -import operator -import collections -import subprocess -import pickle -import errno -import bb.utils -import bb.checksum -import bb.process -import bb.event - -__version__ = "2" -_checksum_cache = bb.checksum.FileChecksumCache() -_revisions_cache = bb.checksum.RevisionsCache() - -logger = logging.getLogger("BitBake.Fetcher") - -CHECKSUM_LIST = [ "goh1", "md5", "sha256", "sha1", "sha384", "sha512" ] -SHOWN_CHECKSUM_LIST = ["sha256"] - -class BBFetchException(Exception): - """Class all fetch exceptions inherit from""" - def __init__(self, message): - self.msg = message - Exception.__init__(self, message) - - def __str__(self): - return self.msg - -class UntrustedUrl(BBFetchException): - """Exception raised when encountering a host not listed in BB_ALLOWED_NETWORKS""" - def __init__(self, url, message=''): - if message: - msg = message - else: - msg = "The URL: '%s' is not trusted and cannot be used" % url - self.url = url - BBFetchException.__init__(self, msg) - self.args = (url,) - -class MalformedUrl(BBFetchException): - """Exception raised when encountering an invalid url""" - def __init__(self, url, message=''): - if message: - msg = message - else: - msg = "The URL: '%s' is invalid and cannot be interpreted" % url - self.url = url - BBFetchException.__init__(self, msg) - self.args = (url,) - -class FetchError(BBFetchException): - """General fetcher exception when something happens incorrectly""" - def __init__(self, message, url = None): - if url: - msg = "Fetcher failure for URL: '%s'. %s" % (url, message) - else: - msg = "Fetcher failure: %s" % message - self.url = url - BBFetchException.__init__(self, msg) - self.args = (message, url) - -class ChecksumError(FetchError): - """Exception when mismatched checksum encountered""" - def __init__(self, message, url = None, checksum = None): - self.checksum = checksum - FetchError.__init__(self, message, url) - -class NoChecksumError(FetchError): - """Exception when no checksum is specified, but BB_STRICT_CHECKSUM is set""" - -class UnpackError(BBFetchException): - """General fetcher exception when something happens incorrectly when unpacking""" - def __init__(self, message, url): - msg = "Unpack failure for URL: '%s'. %s" % (url, message) - self.url = url - BBFetchException.__init__(self, msg) - self.args = (message, url) - -class NoMethodError(BBFetchException): - """Exception raised when there is no method to obtain a supplied url or set of urls""" - def __init__(self, url): - msg = "Could not find a fetcher which supports the URL: '%s'" % url - self.url = url - BBFetchException.__init__(self, msg) - self.args = (url,) - -class MissingParameterError(BBFetchException): - """Exception raised when a fetch method is missing a critical parameter in the url""" - def __init__(self, missing, url): - msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing) - self.url = url - self.missing = missing - BBFetchException.__init__(self, msg) - self.args = (missing, url) - -class ParameterError(BBFetchException): - """Exception raised when a url cannot be processed due to invalid parameters.""" - def __init__(self, message, url): - msg = "URL: '%s' has invalid parameters. %s" % (url, message) - self.url = url - BBFetchException.__init__(self, msg) - self.args = (message, url) - -class NetworkAccess(BBFetchException): - """Exception raised when network access is disabled but it is required.""" - def __init__(self, url, cmd): - msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url) - self.url = url - self.cmd = cmd - BBFetchException.__init__(self, msg) - self.args = (url, cmd) - -class NonLocalMethod(Exception): - def __init__(self): - Exception.__init__(self) - -class MissingChecksumEvent(bb.event.Event): - def __init__(self, url, **checksums): - self.url = url - self.checksums = checksums - bb.event.Event.__init__(self) - - -class URI(object): - """ - A class representing a generic URI, with methods for - accessing the URI components, and stringifies to the - URI. - - It is constructed by calling it with a URI, or setting - the attributes manually: - - uri = URI("http://example.com/") - - uri = URI() - uri.scheme = 'http' - uri.hostname = 'example.com' - uri.path = '/' - - It has the following attributes: - - * scheme (read/write) - * userinfo (authentication information) (read/write) - * username (read/write) - * password (read/write) - - Note, password is deprecated as of RFC 3986. - - * hostname (read/write) - * port (read/write) - * hostport (read only) - "hostname:port", if both are set, otherwise just "hostname" - * path (read/write) - * path_quoted (read/write) - A URI quoted version of path - * params (dict) (read/write) - * query (dict) (read/write) - * relative (bool) (read only) - True if this is a "relative URI", (e.g. file:foo.diff) - - It stringifies to the URI itself. - - Some notes about relative URIs: while it's specified that - a URI beginning with :// should either be directly - followed by a hostname or a /, the old URI handling of the - fetch2 library did not conform to this. Therefore, this URI - class has some kludges to make sure that URIs are parsed in - a way comforming to bitbake's current usage. This URI class - supports the following: - - file:relative/path.diff (IETF compliant) - git:relative/path.git (IETF compliant) - git:///absolute/path.git (IETF compliant) - file:///absolute/path.diff (IETF compliant) - - file://relative/path.diff (not IETF compliant) - - But it does not support the following: - - file://hostname/absolute/path.diff (would be IETF compliant) - - Note that the last case only applies to a list of - explicitly allowed schemes (currently only file://), that requires - its URIs to not have a network location. - """ - - _relative_schemes = ['file', 'git'] - _netloc_forbidden = ['file'] - - def __init__(self, uri=None): - self.scheme = '' - self.userinfo = '' - self.hostname = '' - self.port = None - self._path = '' - self.params = {} - self.query = {} - self.relative = False - - if not uri: - return - - # We hijack the URL parameters, since the way bitbake uses - # them are not quite RFC compliant. - uri, param_str = (uri.split(";", 1) + [None])[:2] - - urlp = urllib.parse.urlparse(uri) - self.scheme = urlp.scheme - - reparse = 0 - - # Coerce urlparse to make URI scheme use netloc - if not self.scheme in urllib.parse.uses_netloc: - urllib.parse.uses_params.append(self.scheme) - reparse = 1 - - # Make urlparse happy(/ier) by converting local resources - # to RFC compliant URL format. E.g.: - # file://foo.diff -> file:foo.diff - if urlp.scheme in self._netloc_forbidden: - uri = re.sub(r"(?<=:)//(?!/)", "", uri, count=1) - reparse = 1 - - if reparse: - urlp = urllib.parse.urlparse(uri) - - # Identify if the URI is relative or not - if urlp.scheme in self._relative_schemes and \ - re.compile(r"^\w+:(?!//)").match(uri): - self.relative = True - - if not self.relative: - self.hostname = urlp.hostname or '' - self.port = urlp.port - - self.userinfo += urlp.username or '' - - if urlp.password: - self.userinfo += ':%s' % urlp.password - - self.path = urllib.parse.unquote(urlp.path) - - if param_str: - self.params = self._param_str_split(param_str, ";") - if urlp.query: - self.query = self._param_str_split(urlp.query, "&") - - def __str__(self): - userinfo = self.userinfo - if userinfo: - userinfo += '@' - - return "%s:%s%s%s%s%s%s" % ( - self.scheme, - '' if self.relative else '//', - userinfo, - self.hostport, - self.path_quoted, - self._query_str(), - self._param_str()) - - def _param_str(self): - return ( - ''.join([';', self._param_str_join(self.params, ";")]) - if self.params else '') - - def _query_str(self): - return ( - ''.join(['?', self._param_str_join(self.query, "&")]) - if self.query else '') - - def _param_str_split(self, string, elmdelim, kvdelim="="): - ret = collections.OrderedDict() - for k, v in [x.split(kvdelim, 1) if kvdelim in x else (x, None) for x in string.split(elmdelim) if x]: - ret[k] = v - return ret - - def _param_str_join(self, dict_, elmdelim, kvdelim="="): - return elmdelim.join([kvdelim.join([k, v]) if v else k for k, v in dict_.items()]) - - @property - def hostport(self): - if not self.port: - return self.hostname - return "%s:%d" % (self.hostname, self.port) - - @property - def path_quoted(self): - return urllib.parse.quote(self.path) - - @path_quoted.setter - def path_quoted(self, path): - self.path = urllib.parse.unquote(path) - - @property - def path(self): - return self._path - - @path.setter - def path(self, path): - self._path = path - - if not path or re.compile("^/").match(path): - self.relative = False - else: - self.relative = True - - @property - def username(self): - if self.userinfo: - return (self.userinfo.split(":", 1))[0] - return '' - - @username.setter - def username(self, username): - password = self.password - self.userinfo = username - if password: - self.userinfo += ":%s" % password - - @property - def password(self): - if self.userinfo and ":" in self.userinfo: - return (self.userinfo.split(":", 1))[1] - return '' - - @password.setter - def password(self, password): - self.userinfo = "%s:%s" % (self.username, password) - -def decodeurl(url): - """Decodes an URL into the tokens (scheme, network location, path, - user, password, parameters). - """ - - uri = URI(url) - path = uri.path if uri.path else "/" - return uri.scheme, uri.hostport, path, uri.username, uri.password, uri.params - -def decodemirrorurl(url): - """Decodes a mirror URL into the tokens (scheme, network location, path, - user, password, parameters). - """ - m = re.compile('(?P[^:]*)://((?P[^/;]+)@)?(?P[^;]+)(;(?P.*))?').match(url) - if not m: - raise MalformedUrl(url) - - type = m.group('type') - location = m.group('location') - if not location: - raise MalformedUrl(url) - user = m.group('user') - parm = m.group('parm') - - locidx = location.find('/') - if locidx != -1 and type.lower() != 'file': - host = location[:locidx] - path = location[locidx:] - elif type.lower() == 'file': - host = "" - path = location - if user: - path = user + '@' + path - user = "" - else: - host = location - path = "/" - if user: - m = re.compile('(?P[^:]+)(:?(?P.*))').match(user) - if m: - user = m.group('user') - pswd = m.group('pswd') - else: - user = '' - pswd = '' - - p = collections.OrderedDict() - if parm: - for s in parm.split(';'): - if s: - if not '=' in s: - raise MalformedUrl(url, "The URL: '%s' is invalid: parameter %s does not specify a value (missing '=')" % (url, s)) - s1, s2 = s.split('=', 1) - p[s1] = s2 - - return type, host, urllib.parse.unquote(path), user, pswd, p - -def encodeurl(decoded): - """Encodes a URL from tokens (scheme, network location, path, - user, password, parameters). - """ - - type, host, path, user, pswd, p = decoded - - if not type: - raise MissingParameterError('type', "encoded from the data %s" % str(decoded)) - uri = URI() - uri.scheme = type - if user and type != "file": - uri.username = user - if pswd: - uri.password = pswd - if host and type != "file": - uri.hostname = host - if path: - # Standardise path to ensure comparisons work - while '//' in path: - path = path.replace("//", "/") - uri.path = path - if type == "file": - # Use old not IETF compliant style - uri.relative = False - if p: - uri.params = p - - return str(uri) - -def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None): - if not ud.url or not uri_find or not uri_replace: - logger.error("uri_replace: passed an undefined value, not replacing") - return None - uri_decoded = list(decodemirrorurl(ud.url)) - uri_find_decoded = list(decodemirrorurl(uri_find)) - uri_replace_decoded = list(decodemirrorurl(uri_replace)) - logger.debug2("For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded)) - result_decoded = ['', '', '', '', '', {}] - # 0 - type, 1 - host, 2 - path, 3 - user, 4- pswd, 5 - params - for loc, i in enumerate(uri_find_decoded): - result_decoded[loc] = uri_decoded[loc] - regexp = i - if loc == 0 and regexp and not regexp.endswith("$"): - # Leaving the type unanchored can mean "https" matching "file" can become "files" - # which is clearly undesirable. - regexp += "$" - if loc == 5: - # Handle URL parameters - if i: - # Any specified URL parameters must match - for k in uri_find_decoded[loc]: - if uri_decoded[loc][k] != uri_find_decoded[loc][k]: - return None - # Overwrite any specified replacement parameters - for k in uri_replace_decoded[loc]: - for l in replacements: - uri_replace_decoded[loc][k] = uri_replace_decoded[loc][k].replace(l, replacements[l]) - result_decoded[loc][k] = uri_replace_decoded[loc][k] - elif (loc == 3 or loc == 4) and uri_replace_decoded[loc]: - # User/password in the replacement is just a straight replacement - result_decoded[loc] = uri_replace_decoded[loc] - elif (re.match(regexp, uri_decoded[loc])): - if not uri_replace_decoded[loc]: - result_decoded[loc] = "" - else: - for k in replacements: - uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k]) - #bb.note("%s %s %s" % (regexp, uri_replace_decoded[loc], uri_decoded[loc])) - result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], count=1) - if loc == 2: - # Handle path manipulations - basename = None - if uri_decoded[0] != uri_replace_decoded[0] and mirrortarball: - # If the source and destination url types differ, must be a mirrortarball mapping - basename = os.path.basename(mirrortarball) - # Kill parameters, they make no sense for mirror tarballs - uri_decoded[5] = {} - uri_find_decoded[5] = {} - elif ud.localpath and ud.method.supports_checksum(ud): - basename = os.path.basename(ud.localpath) - if basename: - uri_basename = os.path.basename(uri_decoded[loc]) - # Prefix with a slash as a sentinel in case - # result_decoded[loc] does not contain one. - path = "/" + result_decoded[loc] - if uri_basename and basename != uri_basename and path.endswith("/" + uri_basename): - result_decoded[loc] = path[1:-len(uri_basename)] + basename - elif not path.endswith("/" + basename): - result_decoded[loc] = os.path.join(path[1:], basename) - else: - return None - result = encodeurl(result_decoded) - if result == ud.url: - return None - logger.debug2("For url %s returning %s" % (ud.url, result)) - return result - -methods = [] -urldata_cache = {} -saved_headrevs = {} - -def fetcher_init(d, servercontext=True): - """ - Called to initialize the fetchers once the configuration data is known. - Calls before this must not hit the cache. - """ - - _checksum_cache.init_cache(d.getVar("BB_CACHEDIR")) - _revisions_cache.init_cache(d.getVar("BB_CACHEDIR")) - - if not servercontext: - return - - try: - # fetcher_init is called multiple times, so make sure we only save the - # revs the first time it is called. - if not bb.fetch2.saved_headrevs: - bb.fetch2.saved_headrevs = _revisions_cache.get_revs() - except: - pass - - # When to drop SCM head revisions controlled by user policy - srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear" - if srcrev_policy == "cache": - logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy) - elif srcrev_policy == "clear": - logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy) - _revisions_cache.clear_cache() - else: - raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy) - - - for m in methods: - if hasattr(m, "init"): - m.init(d) - -def fetcher_parse_save(): - _checksum_cache.save_extras() - _revisions_cache.save_extras() - -def fetcher_parse_done(): - _checksum_cache.save_merge() - _revisions_cache.save_merge() - -def fetcher_compare_revisions(d): - """ - Compare the revisions in the persistent cache with the saved values from - when bitbake was started and return true if they have changed. - """ - - headrevs = _revisions_cache.get_revs() - return headrevs != bb.fetch2.saved_headrevs - -def mirror_from_string(data): - mirrors = (data or "").replace('\\n',' ').split() - # Split into pairs - if len(mirrors) % 2 != 0: - bb.warn('Invalid mirror data %s, should have paired members.' % data) - return list(zip(*[iter(mirrors)]*2)) - -def verify_checksum(ud, d, precomputed={}, localpath=None, fatal_nochecksum=True): - """ - verify the MD5 and SHA256 checksum for downloaded src - - Raises a FetchError if one or both of the SRC_URI checksums do not match - the downloaded file, or if BB_STRICT_CHECKSUM is set and there are no - checksums specified. - - Returns a dict of checksums that can be stored in a done stamp file and - passed in as precomputed parameter in a later call to avoid re-computing - the checksums from the file. This allows verifying the checksums of the - file against those in the recipe each time, rather than only after - downloading. See https://bugzilla.yoctoproject.org/show_bug.cgi?id=5571. - """ - if ud.ignore_checksums or not ud.method.supports_checksum(ud): - return {} - - if localpath is None: - localpath = ud.localpath - - def compute_checksum_info(checksum_id): - checksum_name = getattr(ud, "%s_name" % checksum_id) - - if checksum_id in precomputed: - checksum_data = precomputed[checksum_id] - else: - checksum_data = getattr(bb.utils, "%s_file" % checksum_id)(localpath) - - checksum_expected = getattr(ud, "%s_expected" % checksum_id) - - if checksum_expected == '': - checksum_expected = None - - return { - "id": checksum_id, - "name": checksum_name, - "data": checksum_data, - "expected": checksum_expected - } - - checksum_infos = [] - for checksum_id in CHECKSUM_LIST: - checksum_infos.append(compute_checksum_info(checksum_id)) - - checksum_dict = {ci["id"] : ci["data"] for ci in checksum_infos} - checksum_event = {"%ssum" % ci["id"] : ci["data"] for ci in checksum_infos} - - for ci in checksum_infos: - if ci["id"] in SHOWN_CHECKSUM_LIST: - checksum_lines = ["SRC_URI[%s] = \"%s\"" % (ci["name"], ci["data"])] - - # If no checksum has been provided - if fatal_nochecksum and ud.method.recommends_checksum(ud) and all(ci["expected"] is None for ci in checksum_infos): - messages = [] - strict = d.getVar("BB_STRICT_CHECKSUM") or "0" - - # If strict checking enabled and neither sum defined, raise error - if strict == "1": - raise NoChecksumError("\n".join(checksum_lines)) - - bb.event.fire(MissingChecksumEvent(ud.url, **checksum_event), d) - - if strict == "ignore": - return checksum_dict - - # Log missing sums so user can more easily add them - messages.append("Missing checksum for '%s', consider adding at " \ - "least one to the recipe:" % ud.localpath) - messages.extend(checksum_lines) - logger.warning("\n".join(messages)) - - # We want to alert the user if a checksum is defined in the recipe but - # it does not match. - messages = [] - messages.append("Checksum mismatch!") - bad_checksum = None - - for ci in checksum_infos: - if ci["expected"] and ci["expected"] != ci["data"]: - messages.append("File: '%s' has %s checksum '%s' when '%s' was " \ - "expected" % (localpath, ci["id"], ci["data"], ci["expected"])) - bad_checksum = ci["data"] - - if bad_checksum: - messages.append("If this change is expected (e.g. you have upgraded " \ - "to a new version without updating the checksums) " \ - "then you can use these lines within the recipe:") - messages.extend(checksum_lines) - messages.append("Otherwise you should retry the download and/or " \ - "check with upstream to determine if the file has " \ - "become corrupted or otherwise unexpectedly modified.") - raise ChecksumError("\n".join(messages), ud.url, bad_checksum) - - return checksum_dict - -def verify_donestamp(ud, d, origud=None): - """ - Check whether the done stamp file has the right checksums (if the fetch - method supports them). If it doesn't, delete the done stamp and force - a re-download. - - Returns True, if the donestamp exists and is valid, False otherwise. When - returning False, any existing done stamps are removed. - """ - if not ud.needdonestamp or (origud and not origud.needdonestamp): - return True - - if not os.path.exists(ud.localpath): - # local path does not exist - if os.path.exists(ud.donestamp): - # done stamp exists, but the downloaded file does not; the done stamp - # must be incorrect, re-trigger the download - bb.utils.remove(ud.donestamp) - return False - - if (not ud.method.supports_checksum(ud) or - (origud and not origud.method.supports_checksum(origud))): - # if done stamp exists and checksums not supported; assume the local - # file is current - return os.path.exists(ud.donestamp) - - precomputed_checksums = {} - # Only re-use the precomputed checksums if the donestamp is newer than the - # file. Do not rely on the mtime of directories, though. If ud.localpath is - # a directory, there will probably not be any checksums anyway. - if os.path.exists(ud.donestamp) and (os.path.isdir(ud.localpath) or - os.path.getmtime(ud.localpath) < os.path.getmtime(ud.donestamp)): - try: - with open(ud.donestamp, "rb") as cachefile: - pickled = pickle.Unpickler(cachefile) - precomputed_checksums.update(pickled.load()) - except Exception as e: - # Avoid the warnings on the upgrade path from emtpy done stamp - # files to those containing the checksums. - if not isinstance(e, EOFError): - # Ignore errors, they aren't fatal - logger.warning("Couldn't load checksums from donestamp %s: %s " - "(msg: %s)" % (ud.donestamp, type(e).__name__, - str(e))) - - try: - checksums = verify_checksum(ud, d, precomputed_checksums) - # If the cache file did not have the checksums, compute and store them - # as an upgrade path from the previous done stamp file format. - if checksums != precomputed_checksums: - with open(ud.donestamp, "wb") as cachefile: - p = pickle.Pickler(cachefile, 2) - p.dump(checksums) - return True - except ChecksumError as e: - # Checksums failed to verify, trigger re-download and remove the - # incorrect stamp file. - logger.warning("Checksum mismatch for local file %s\n" - "Cleaning and trying again." % ud.localpath) - if os.path.exists(ud.localpath): - rename_bad_checksum(ud, e.checksum) - bb.utils.remove(ud.donestamp) - return False - - -def update_stamp(ud, d): - """ - donestamp is file stamp indicating the whole fetching is done - this function update the stamp after verifying the checksum - """ - if not ud.needdonestamp: - return - - if os.path.exists(ud.donestamp): - # Touch the done stamp file to show active use of the download - try: - os.utime(ud.donestamp, None) - except: - # Errors aren't fatal here - pass - else: - try: - checksums = verify_checksum(ud, d) - # Store the checksums for later re-verification against the recipe - with open(ud.donestamp, "wb") as cachefile: - p = pickle.Pickler(cachefile, 2) - p.dump(checksums) - except ChecksumError as e: - # Checksums failed to verify, trigger re-download and remove the - # incorrect stamp file. - logger.warning("Checksum mismatch for local file %s\n" - "Cleaning and trying again." % ud.localpath) - if os.path.exists(ud.localpath): - rename_bad_checksum(ud, e.checksum) - bb.utils.remove(ud.donestamp) - raise - -def subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - # SIGPIPE errors are known issues with gzip/bash - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - -def mark_recipe_nocache(d): - if d.getVar('BB_SRCREV_POLICY') != "cache": - d.setVar('BB_DONT_CACHE', '1') - -def get_autorev(d): - mark_recipe_nocache(d) - d.setVar("__BBAUTOREV_SEEN", True) - return "AUTOINC" - -def _get_srcrev(d, method_name='sortable_revision'): - """ - Return the revision string, usually for use in the version string (PV) of the current package - Most packages usually only have one SCM so we just pass on the call. - In the multi SCM case, we build a value based on SRCREV_FORMAT which must - have been set. - - The idea here is that we put the string "AUTOINC+" into return value if the revisions are not - incremental, other code is then responsible for turning that into an increasing value (if needed) - - A method_name can be supplied to retrieve an alternatively formatted revision from a fetcher, if - that fetcher provides a method with the given name and the same signature as sortable_revision. - """ - - d.setVar("__BBSRCREV_SEEN", "1") - recursion = d.getVar("__BBINSRCREV") - if recursion: - raise FetchError("There are recursive references in fetcher variables, likely through SRC_URI") - d.setVar("__BBINSRCREV", True) - - scms = [] - revs = [] - fetcher = Fetch(d.getVar('SRC_URI').split(), d) - urldata = fetcher.ud - for u in urldata: - if urldata[u].method.supports_srcrev(): - scms.append(u) - - if not scms: - d.delVar("__BBINSRCREV") - return "", revs - - - if len(scms) == 1: - autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].name) - revs.append(rev) - if len(rev) > 10: - rev = rev[:10] - d.delVar("__BBINSRCREV") - if autoinc: - return "AUTOINC+" + rev, revs - return rev, revs - - # - # Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT - # - format = d.getVar('SRCREV_FORMAT') - if not format: - raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.\n"\ - "The SCMs are:\n%s" % '\n'.join(scms)) - - name_to_rev = {} - seenautoinc = False - for scm in scms: - ud = urldata[scm] - autoinc, rev = getattr(ud.method, method_name)(ud, d, ud.name) - revs.append(rev) - seenautoinc = seenautoinc or autoinc - if len(rev) > 10: - rev = rev[:10] - name_to_rev[ud.name] = rev - # Replace names by revisions in the SRCREV_FORMAT string. The approach used - # here can handle names being prefixes of other names and names appearing - # as substrings in revisions (in which case the name should not be - # expanded). The '|' regular expression operator tries matches from left to - # right, so we need to sort the names with the longest ones first. - names_descending_len = sorted(name_to_rev, key=len, reverse=True) - name_to_rev_re = "|".join(re.escape(name) for name in names_descending_len) - format = re.sub(name_to_rev_re, lambda match: name_to_rev[match.group(0)], format) - - if seenautoinc: - format = "AUTOINC+" + format - - d.delVar("__BBINSRCREV") - return format, revs - -def get_hashvalue(d, method_name='sortable_revision'): - pkgv, revs = _get_srcrev(d, method_name=method_name) - return " ".join(revs) - -def get_pkgv_string(d, method_name='sortable_revision'): - pkgv, revs = _get_srcrev(d, method_name=method_name) - return pkgv - -def get_srcrev(d, method_name='sortable_revision'): - pkgv, revs = _get_srcrev(d, method_name=method_name) - if not pkgv: - raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI") - return pkgv - -def localpath(url, d): - fetcher = bb.fetch2.Fetch([url], d) - return fetcher.localpath(url) - -# Need to export PATH as binary could be in metadata paths -# rather than host provided -# Also include some other variables. -FETCH_EXPORT_VARS = ['HOME', 'PATH', - 'HTTP_PROXY', 'http_proxy', - 'HTTPS_PROXY', 'https_proxy', - 'FTP_PROXY', 'ftp_proxy', - 'FTPS_PROXY', 'ftps_proxy', - 'NO_PROXY', 'no_proxy', - 'ALL_PROXY', 'all_proxy', - 'GIT_PROXY_COMMAND', - 'GIT_SSH', - 'GIT_SSH_COMMAND', - 'GIT_SSL_CAINFO', - 'GIT_SMART_HTTP', - 'SSH_AUTH_SOCK', 'SSH_AGENT_PID', - 'SOCKS5_USER', 'SOCKS5_PASSWD', - 'DBUS_SESSION_BUS_ADDRESS', - 'P4CONFIG', - 'SSL_CERT_FILE', - 'NODE_EXTRA_CA_CERTS', - 'AWS_PROFILE', - 'AWS_ACCESS_KEY_ID', - 'AWS_SECRET_ACCESS_KEY', - 'AWS_ROLE_ARN', - 'AWS_WEB_IDENTITY_TOKEN_FILE', - 'AWS_DEFAULT_REGION', - 'AWS_SESSION_TOKEN', - 'GIT_CACHE_PATH', - 'REMOTE_CONTAINERS_IPC', - 'GITHUB_TOKEN', - 'SSL_CERT_DIR'] - -def get_fetcher_environment(d): - newenv = {} - origenv = d.getVar("BB_ORIGENV") - for name in bb.fetch2.FETCH_EXPORT_VARS: - value = d.getVar(name) - if not value and origenv: - value = origenv.getVar(name) - if value: - newenv[name] = value - return newenv - -def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None): - """ - Run cmd returning the command output - Raise an error if interrupted or cmd fails - Optionally echo command output to stdout - Optionally remove the files/directories listed in cleanup upon failure - """ - - exportvars = FETCH_EXPORT_VARS - - if not cleanup: - cleanup = [] - - # If PATH contains WORKDIR which contains PV-PR which contains SRCPV we - # can end up in circular recursion here so give the option of breaking it - # in a data store copy. - try: - d.getVar("PV") - d.getVar("PR") - except bb.data_smart.ExpansionError: - d = bb.data.createCopy(d) - d.setVar("PV", "fetcheravoidrecurse") - d.setVar("PR", "fetcheravoidrecurse") - - origenv = d.getVar("BB_ORIGENV", False) - for var in exportvars: - val = d.getVar(var) or (origenv and origenv.getVar(var)) - if val: - cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd) - - # Disable pseudo as it may affect ssh, potentially causing it to hang. - cmd = 'export PSEUDO_DISABLED=1; ' + cmd - - if workdir: - logger.debug("Running '%s' in %s" % (cmd, workdir)) - else: - logger.debug("Running %s", cmd) - - success = False - error_message = "" - - try: - (output, errors) = bb.process.run(cmd, log=log, shell=True, stderr=subprocess.PIPE, cwd=workdir) - success = True - except bb.process.NotFoundError as e: - error_message = "Fetch command %s not found" % (e.command) - except bb.process.ExecutionError as e: - if e.stdout: - output = "output:\n%s\n%s" % (e.stdout, e.stderr) - elif e.stderr: - output = "output:\n%s" % e.stderr - else: - if log: - output = "see logfile for output" - else: - output = "no output" - error_message = "Fetch command %s failed with exit code %s, %s" % (e.command, e.exitcode, output) - except bb.process.CmdError as e: - error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg) - if not success: - for f in cleanup: - try: - bb.utils.remove(f, True) - except OSError: - pass - - raise FetchError(error_message) - - return output - -def check_network_access(d, info, url): - """ - log remote network access, and error if BB_NO_NETWORK is set or the given - URI is untrusted - """ - if bb.utils.to_boolean(d.getVar("BB_NO_NETWORK")): - raise NetworkAccess(url, info) - elif not trusted_network(d, url): - raise UntrustedUrl(url, info) - else: - logger.debug("Fetcher accessed the network with the command %s" % info) - -def build_mirroruris(origud, mirrors, ld): - uris = [] - uds = [] - - replacements = {} - replacements["TYPE"] = origud.type - replacements["HOST"] = origud.host - replacements["PATH"] = origud.path - replacements["BASENAME"] = origud.path.split("/")[-1] - replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.') - - def adduri(ud, uris, uds, mirrors, tarballs): - for line in mirrors: - try: - (find, replace) = line - except ValueError: - continue - - for tarball in tarballs: - newuri = uri_replace(ud, find, replace, replacements, ld, tarball) - if not newuri or newuri in uris or newuri == origud.url: - continue - - if not trusted_network(ld, newuri): - logger.debug("Mirror %s not in the list of trusted networks, skipping" % (newuri)) - continue - - # Create a local copy of the mirrors minus the current line - # this will prevent us from recursively processing the same line - # as well as indirect recursion A -> B -> C -> A - localmirrors = list(mirrors) - localmirrors.remove(line) - - try: - newud = FetchData(newuri, ld) - newud.ignore_checksums = True - newud.setup_localpath(ld) - except bb.fetch2.BBFetchException as e: - logger.debug("Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url)) - logger.debug(str(e)) - try: - # setup_localpath of file:// urls may fail, we should still see - # if mirrors of the url exist - adduri(newud, uris, uds, localmirrors, tarballs) - except UnboundLocalError: - pass - continue - uris.append(newuri) - uds.append(newud) - - adduri(newud, uris, uds, localmirrors, tarballs) - - adduri(origud, uris, uds, mirrors, origud.mirrortarballs or [None]) - - return uris, uds - -def rename_bad_checksum(ud, suffix): - """ - Renames files to have suffix from parameter - """ - - if ud.localpath is None: - return - - new_localpath = "%s_bad-checksum_%s" % (ud.localpath, suffix) - bb.warn("Renaming %s to %s" % (ud.localpath, new_localpath)) - if not bb.utils.movefile(ud.localpath, new_localpath): - bb.warn("Renaming %s to %s failed, grep movefile in log.do_fetch to see why" % (ud.localpath, new_localpath)) - - -def try_mirror_url(fetch, origud, ud, ld, check = False): - # Return of None or a value means we're finished - # False means try another url - - if ud.lockfile and ud.lockfile != origud.lockfile: - lf = bb.utils.lockfile(ud.lockfile) - - try: - if check: - found = ud.method.checkstatus(fetch, ud, ld) - if found: - return found - return False - - if not verify_donestamp(ud, ld, origud) or ud.method.need_update(ud, ld): - ud.method.download(ud, ld) - if hasattr(ud.method,"build_mirror_data"): - ud.method.build_mirror_data(ud, ld) - - if not ud.localpath or not os.path.exists(ud.localpath): - return False - - if ud.localpath == origud.localpath: - return ud.localpath - - # We may be obtaining a mirror tarball which needs further processing by the real fetcher - # If that tarball is a local file:// we need to provide a symlink to it - dldir = ld.getVar("DL_DIR") - - if bb.utils.to_boolean(ld.getVar("BB_FETCH_PREMIRRORONLY")): - ld = ld.createCopy() - ld.setVar("BB_NO_NETWORK", "1") - - if origud.mirrortarballs and os.path.basename(ud.localpath) in origud.mirrortarballs and os.path.basename(ud.localpath) != os.path.basename(origud.localpath): - # Create donestamp in old format to avoid triggering a re-download - if ud.donestamp: - bb.utils.mkdirhier(os.path.dirname(ud.donestamp)) - open(ud.donestamp, 'w').close() - dest = os.path.join(dldir, os.path.basename(ud.localpath)) - if not os.path.exists(dest): - # In case this is executing without any file locks held (as is - # the case for file:// URLs), two tasks may end up here at the - # same time, in which case we do not want the second task to - # fail when the link has already been created by the first task. - try: - os.symlink(ud.localpath, dest) - except FileExistsError: - pass - if not verify_donestamp(origud, ld) or origud.method.need_update(origud, ld): - origud.method.download(origud, ld) - if hasattr(origud.method, "build_mirror_data"): - origud.method.build_mirror_data(origud, ld) - return origud.localpath - # Otherwise the result is a local file:// and we symlink to it - # This may also be a link to a shallow archive - # When using shallow mode, add a symlink to the original fullshallow - # path to ensure a valid symlink even in the `PREMIRRORS` case - origud.method.update_mirror_links(ud, origud) - update_stamp(origud, ld) - return ud.localpath - - except bb.fetch2.NetworkAccess: - raise - - except IOError as e: - if e.errno in [errno.ESTALE]: - logger.warning("Stale Error Observed %s." % ud.url) - return False - raise - - except bb.fetch2.BBFetchException as e: - if isinstance(e, ChecksumError): - logger.warning("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (ud.url, origud.url)) - logger.warning(str(e)) - if os.path.exists(ud.localpath): - rename_bad_checksum(ud, e.checksum) - elif isinstance(e, NoChecksumError): - raise - else: - logger.debug("Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url)) - logger.debug(str(e)) - try: - if ud.method.cleanup_upon_failure(): - ud.method.clean(ud, ld) - except UnboundLocalError: - pass - return False - finally: - if ud.lockfile and ud.lockfile != origud.lockfile: - bb.utils.unlockfile(lf) - -def try_mirrors(fetch, d, origud, mirrors, check = False): - """ - Try to use a mirrored version of the sources. - This method will be automatically called before the fetchers go. - - d Is a bb.data instance - uri is the original uri we're trying to download - mirrors is the list of mirrors we're going to try - """ - ld = d.createCopy() - - uris, uds = build_mirroruris(origud, mirrors, ld) - - for index, uri in enumerate(uris): - ret = try_mirror_url(fetch, origud, uds[index], ld, check) - if ret: - return ret - return None - -def trusted_network(d, url): - """ - Use a trusted url during download if networking is enabled and - BB_ALLOWED_NETWORKS is set globally or for a specific recipe. - Note: modifies SRC_URI & mirrors. - """ - if bb.utils.to_boolean(d.getVar("BB_NO_NETWORK")): - return True - - pkgname = d.getVar('PN') - trusted_hosts = None - if pkgname: - trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False) - - if not trusted_hosts: - trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS') - - # Not enabled. - if not trusted_hosts: - return True - - scheme, network, path, user, passwd, param = decodeurl(url) - - if not network: - return True - - network = network.split(':')[0] - network = network.lower() - - for host in trusted_hosts.split(" "): - host = host.lower() - if host.startswith("*.") and ("." + network).endswith(host[1:]): - return True - if host == network: - return True - - return False - -def srcrev_internal_helper(ud, d, name): - """ - Return: - a) a source revision if specified - b) latest revision if SRCREV="AUTOINC" - c) None if not specified - """ - - srcrev = None - pn = d.getVar("PN") - attempts = [] - if name != '' and pn: - attempts.append("SRCREV_%s:pn-%s" % (name, pn)) - if name != '': - attempts.append("SRCREV_%s" % name) - if pn: - attempts.append("SRCREV:pn-%s" % pn) - attempts.append("SRCREV") - - for a in attempts: - srcrev = d.getVar(a) - if srcrev and srcrev != "INVALID": - break - - if 'rev' in ud.parm: - parmrev = ud.parm['rev'] - if srcrev == "INVALID" or not srcrev: - return parmrev - if srcrev != parmrev: - raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev)) - return parmrev - - if 'tag' in ud.parm and (srcrev == "INVALID" or not srcrev): - return ud.parm['tag'] - - if srcrev == "INVALID" or not srcrev: - raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url) - if srcrev == "AUTOINC": - d.setVar("__BBAUTOREV_ACTED_UPON", True) - srcrev = ud.method.latest_revision(ud, d, name) - - return srcrev - -def get_checksum_file_list(d): - """ Get a list of files checksum in SRC_URI - - Returns the resolved local paths of all local file entries in - SRC_URI as a space-separated string - """ - fetch = Fetch([], d, cache = False, localonly = True) - filelist = [] - for u in fetch.urls: - ud = fetch.ud[u] - if ud and isinstance(ud.method, local.Local): - found = False - paths = ud.method.localfile_searchpaths(ud, d) - for f in paths: - pth = ud.path - if os.path.exists(f): - found = True - filelist.append(f + ":" + str(os.path.exists(f))) - if not found: - bb.fatal(("Unable to get checksum for %s SRC_URI entry %s: file could not be found" - "\nThe following paths were searched:" - "\n%s") % (d.getVar('PN'), os.path.basename(f), '\n'.join(paths))) - - return " ".join(filelist) - -def get_file_checksums(filelist, pn, localdirsexclude): - """Get a list of the checksums for a list of local files - - Returns the checksums for a list of local files, caching the results as - it proceeds - - """ - return _checksum_cache.get_checksums(filelist, pn, localdirsexclude) - - -class FetchData(object): - """ - A class which represents the fetcher state for a given URI. - """ - def __init__(self, url, d, localonly = False): - # localpath is the location of a downloaded result. If not set, the file is local. - self.donestamp = None - self.needdonestamp = True - self.localfile = "" - self.localpath = None - self.lockfile = None - self.mirrortarballs = [] - self.basename = None - self.basepath = None - (self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(d.expand(url)) - self.date = self.getSRCDate(d) - self.url = url - if not self.user and "user" in self.parm: - self.user = self.parm["user"] - if not self.pswd and "pswd" in self.parm: - self.pswd = self.parm["pswd"] - self.setup = False - - def configure_checksum(checksum_id): - checksum_plain_name = "%ssum" % checksum_id - if "name" in self.parm: - checksum_name = "%s.%ssum" % (self.parm["name"], checksum_id) - else: - checksum_name = checksum_plain_name - - if checksum_name in self.parm: - checksum_expected = self.parm[checksum_name] - elif checksum_plain_name in self.parm: - checksum_expected = self.parm[checksum_plain_name] - checksum_name = checksum_plain_name - elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate", "gs", "gomod", "npm"]: - checksum_expected = None - else: - checksum_expected = d.getVarFlag("SRC_URI", checksum_name) - - setattr(self, "%s_name" % checksum_id, checksum_name) - setattr(self, "%s_expected" % checksum_id, checksum_expected) - - self.name = self.parm.get("name",'default') - if "," in self.name: - raise ParameterError("The fetcher no longer supports multiple name parameters in a single url", self.url) - - self.method = None - for m in methods: - if m.supports(self, d): - self.method = m - break - - if not self.method: - raise NoMethodError(url) - - if localonly and not isinstance(self.method, local.Local): - raise NonLocalMethod() - - if self.parm.get("proto", None) and "protocol" not in self.parm: - logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN')) - self.parm["protocol"] = self.parm.get("proto", None) - - if hasattr(self.method, "urldata_init"): - self.method.urldata_init(self, d) - - for checksum_id in CHECKSUM_LIST: - configure_checksum(checksum_id) - - self.ignore_checksums = False - - if "localpath" in self.parm: - # if user sets localpath for file, use it instead. - self.localpath = self.parm["localpath"] - self.basename = os.path.basename(self.localpath) - elif self.localfile: - self.localpath = self.method.localpath(self, d) - - dldir = d.getVar("DL_DIR") - - if not self.needdonestamp: - return - - # Note: .done and .lock files should always be in DL_DIR whereas localpath may not be. - if self.localpath and self.localpath.startswith(dldir): - basepath = self.localpath - elif self.localpath: - basepath = dldir + os.sep + os.path.basename(self.localpath) - elif self.basepath or self.basename: - basepath = dldir + os.sep + (self.basepath or self.basename) - else: - bb.fatal("Can't determine lock path for url %s" % url) - - self.donestamp = basepath + '.done' - self.lockfile = basepath + '.lock' - - def setup_revisions(self, d): - self.revision = srcrev_internal_helper(self, d, self.name) - - def setup_localpath(self, d): - if not self.localpath: - self.localpath = self.method.localpath(self, d) - - def getSRCDate(self, d): - """ - Return the SRC Date for the component - - d the bb.data module - """ - if "srcdate" in self.parm: - return self.parm['srcdate'] - - pn = d.getVar("PN") - - if pn: - return d.getVar("SRCDATE_%s" % pn) or d.getVar("SRCDATE") or d.getVar("DATE") - - return d.getVar("SRCDATE") or d.getVar("DATE") - -class FetchMethod(object): - """Base class for 'fetch'ing data""" - - def __init__(self, urls=None): - self.urls = [] - - def supports(self, urldata, d): - """ - Check to see if this fetch class supports a given url. - """ - return 0 - - def localpath(self, urldata, d): - """ - Return the local filename of a given url assuming a successful fetch. - Can also setup variables in urldata for use in go (saving code duplication - and duplicate code execution) - """ - return os.path.join(d.getVar("DL_DIR"), urldata.localfile) - - def supports_checksum(self, urldata): - """ - Is localpath something that can be represented by a checksum? - """ - - # We cannot compute checksums for None - if urldata.localpath is None: - return False - # We cannot compute checksums for directories - if os.path.isdir(urldata.localpath): - return False - return True - - def recommends_checksum(self, urldata): - """ - Is the backend on where checksumming is recommended (should warnings - be displayed if there is no checksum)? - """ - return False - - def cleanup_upon_failure(self): - """ - When a fetch fails, should clean() be called? - """ - return True - - def verify_donestamp(self, ud, d): - """ - Verify the donestamp file - """ - return verify_donestamp(ud, d) - - def update_donestamp(self, ud, d): - """ - Update the donestamp file - """ - update_stamp(ud, d) - - def _strip_leading_slashes(self, relpath): - """ - Remove leading slash as os.path.join can't cope - """ - while os.path.isabs(relpath): - relpath = relpath[1:] - return relpath - - def setUrls(self, urls): - self.__urls = urls - - def getUrls(self): - return self.__urls - - urls = property(getUrls, setUrls, None, "Urls property") - - def need_update(self, ud, d): - """ - Force a fetch, even if localpath exists? - """ - if os.path.exists(ud.localpath): - return False - return True - - def supports_srcrev(self): - """ - The fetcher supports auto source revisions (SRCREV) - """ - return False - - def download(self, urldata, d): - """ - Fetch urls - Assumes localpath was called first - """ - raise NoMethodError(urldata.url) - - def unpack(self, urldata, rootdir, data): - iterate = False - file = urldata.localpath - - try: - unpack = bb.utils.to_boolean(urldata.parm.get('unpack'), True) - except ValueError as exc: - bb.fatal("Invalid value for 'unpack' parameter for %s: %s" % - (file, urldata.parm.get('unpack'))) - - base, ext = os.path.splitext(file) - if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz', '.zst']: - efile = os.path.join(rootdir, os.path.basename(base)) - else: - efile = file - cmd = None - - if unpack: - tar_cmd = 'tar --extract --no-same-owner' - if 'striplevel' in urldata.parm: - tar_cmd += ' --strip-components=%s' % urldata.parm['striplevel'] - if file.endswith('.tar'): - cmd = '%s -f %s' % (tar_cmd, file) - elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'): - cmd = '%s -z -f %s' % (tar_cmd, file) - elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'): - cmd = 'bzip2 -dc %s | %s -f -' % (file, tar_cmd) - elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'): - cmd = 'gzip -dc %s > %s' % (file, efile) - elif file.endswith('.bz2'): - cmd = 'bzip2 -dc %s > %s' % (file, efile) - elif file.endswith('.txz') or file.endswith('.tar.xz'): - cmd = 'xz -dc %s | %s -f -' % (file, tar_cmd) - elif file.endswith('.xz'): - cmd = 'xz -dc %s > %s' % (file, efile) - elif file.endswith('.tar.lz'): - cmd = 'lzip -dc %s | %s -f -' % (file, tar_cmd) - elif file.endswith('.lz'): - cmd = 'lzip -dc %s > %s' % (file, efile) - elif file.endswith('.tar.7z'): - cmd = '7z x -so %s | %s -f -' % (file, tar_cmd) - elif file.endswith('.7z'): - cmd = '7za x -y %s 1>/dev/null' % file - elif file.endswith('.tzst') or file.endswith('.tar.zst'): - cmd = 'zstd --decompress --stdout %s | %s -f -' % (file, tar_cmd) - elif file.endswith('.zst'): - cmd = 'zstd --decompress --stdout %s > %s' % (file, efile) - elif file.endswith('.zip') or file.endswith('.jar'): - try: - dos = bb.utils.to_boolean(urldata.parm.get('dos'), False) - except ValueError as exc: - bb.fatal("Invalid value for 'dos' parameter for %s: %s" % - (file, urldata.parm.get('dos'))) - cmd = 'unzip -q -o' - if dos: - cmd = '%s -a' % cmd - cmd = "%s '%s'" % (cmd, file) - elif file.endswith('.rpm') or file.endswith('.srpm'): - if 'extract' in urldata.parm: - unpack_file = urldata.parm.get('extract') - cmd = 'rpm2cpio.sh %s | cpio -id %s' % (file, unpack_file) - iterate = True - iterate_file = unpack_file - else: - cmd = 'rpm2cpio.sh %s | cpio -id' % (file) - elif file.endswith('.deb') or file.endswith('.ipk'): - output = subprocess.check_output(['ar', '-t', file], preexec_fn=subprocess_setup) - datafile = None - if output: - for line in output.decode().splitlines(): - if line.startswith('data.tar.') or line == 'data.tar': - datafile = line - break - else: - raise UnpackError("Unable to unpack deb/ipk package - does not contain data.tar* file", urldata.url) - else: - raise UnpackError("Unable to unpack deb/ipk package - could not list contents", urldata.url) - cmd = 'ar x %s %s && %s -p -f %s && rm %s' % (file, datafile, tar_cmd, datafile, datafile) - - # If 'subdir' param exists, create a dir and use it as destination for unpack cmd - if 'subdir' in urldata.parm: - subdir = urldata.parm.get('subdir') - if os.path.isabs(subdir): - if not os.path.realpath(subdir).startswith(os.path.realpath(rootdir)): - raise UnpackError("subdir argument isn't a subdirectory of unpack root %s" % rootdir, urldata.url) - unpackdir = subdir - else: - unpackdir = os.path.join(rootdir, subdir) - bb.utils.mkdirhier(unpackdir) - else: - unpackdir = rootdir - - if not unpack or not cmd: - urldata.unpack_tracer.unpack("file-copy", unpackdir) - # If file == dest, then avoid any copies, as we already put the file into dest! - dest = os.path.join(unpackdir, os.path.basename(file)) - if file != dest and not (os.path.exists(dest) and os.path.samefile(file, dest)): - destdir = '.' - # For file:// entries all intermediate dirs in path must be created at destination - if urldata.type == "file": - # Trailing '/' does a copying to wrong place - urlpath = urldata.path.rstrip('/') - # Want files places relative to cwd so no leading '/' - urlpath = urlpath.lstrip('/') - if urlpath.find("/") != -1: - destdir = urlpath.rsplit("/", 1)[0] + '/' - bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir)) - cmd = 'cp --force --preserve=timestamps --no-dereference --recursive -H "%s" "%s"' % (file, destdir) - else: - urldata.unpack_tracer.unpack("archive-extract", unpackdir) - - if not cmd: - return - - path = data.getVar('PATH') - if path: - cmd = "PATH=\"%s\" %s" % (path, cmd) - bb.note("Unpacking %s to %s/" % (file, unpackdir)) - ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=unpackdir) - - if ret != 0: - raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), urldata.url) - - if iterate is True: - iterate_urldata = urldata - iterate_urldata.localpath = "%s/%s" % (rootdir, iterate_file) - self.unpack(urldata, rootdir, data) - - return - - def clean(self, urldata, d): - """ - Clean any existing full or partial download - """ - bb.utils.remove(urldata.localpath) - - def ensure_symlink(self, target, link_name): - if not os.path.exists(link_name): - dirname = os.path.dirname(link_name) - bb.utils.mkdirhier(dirname) - if os.path.islink(link_name): - # Broken symbolic link - os.unlink(link_name) - - # In case this is executing without any file locks held (as is - # the case for file:// URLs), two tasks may end up here at the - # same time, in which case we do not want the second task to - # fail when the link has already been created by the first task. - try: - os.symlink(target, link_name) - except FileExistsError: - pass - - def update_mirror_links(self, ud, origud): - # For local file:// results, create a symlink to them - # This may also be a link to a shallow archive - self.ensure_symlink(ud.localpath, origud.localpath) - - def try_premirror(self, urldata, d): - """ - Should premirrors be used? - """ - return True - - def try_mirrors(self, fetch, urldata, d, mirrors, check=False): - """ - Try to use a mirror - """ - return bool(try_mirrors(fetch, d, urldata, mirrors, check)) - - def checkstatus(self, fetch, urldata, d): - """ - Check the status of a URL - Assumes localpath was called first - """ - logger.info("URL %s could not be checked for status since no method exists.", urldata.url) - return True - - def latest_revision(self, ud, d, name): - """ - Look in the cache for the latest revision, if not present ask the SCM. - """ - if not hasattr(self, "_latest_revision"): - raise ParameterError("The fetcher for this URL does not support _latest_revision", ud.url) - - key = self.generate_revision_key(ud, d, name) - - rev = _revisions_cache.get_rev(key) - if rev is None: - rev = self._latest_revision(ud, d, name) - _revisions_cache.set_rev(key, rev) - return rev - - def sortable_revision(self, ud, d, name): - latest_rev = self._build_revision(ud, d, name) - return True, str(latest_rev) - - def generate_revision_key(self, ud, d, name): - return self._revision_key(ud, d, name) - - def latest_versionstring(self, ud, d): - """ - Compute the latest release name like "x.y.x" in "x.y.x+gitHASH" - by searching through the tags output of ls-remote, comparing - versions and returning the highest match as a (version, revision) pair. - """ - return ('', '') - - def done(self, ud, d): - """ - Is the download done ? - """ - if os.path.exists(ud.localpath): - return True - return False - - def implicit_urldata(self, ud, d): - """ - Get a list of FetchData objects for any implicit URLs that will also - be downloaded when we fetch the given URL. - """ - return [] - - -class DummyUnpackTracer(object): - """ - Abstract API definition for a class that traces unpacked source files back - to their respective upstream SRC_URI entries, for software composition - analysis, license compliance and detailed SBOM generation purposes. - User may load their own unpack tracer class (instead of the dummy - one) by setting the BB_UNPACK_TRACER_CLASS config parameter. - """ - def start(self, unpackdir, urldata_dict, d): - """ - Start tracing the core Fetch.unpack process, using an index to map - unpacked files to each SRC_URI entry. - This method is called by Fetch.unpack and it may receive nested calls by - gitsm and npmsw fetchers, that expand SRC_URI entries by adding implicit - URLs and by recursively calling Fetch.unpack from new (nested) Fetch - instances. - """ - return - def start_url(self, url): - """Start tracing url unpack process. - This method is called by Fetch.unpack before the fetcher-specific unpack - method starts, and it may receive nested calls by gitsm and npmsw - fetchers. - """ - return - def unpack(self, unpack_type, destdir): - """ - Set unpack_type and destdir for current url. - This method is called by the fetcher-specific unpack method after url - tracing started. - """ - return - def finish_url(self, url): - """Finish tracing url unpack process and update the file index. - This method is called by Fetch.unpack after the fetcher-specific unpack - method finished its job, and it may receive nested calls by gitsm - and npmsw fetchers. - """ - return - def complete(self): - """ - Finish tracing the Fetch.unpack process, and check if all nested - Fecth.unpack calls (if any) have been completed; if so, save collected - metadata. - """ - return - - -class Fetch(object): - def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None): - if localonly and cache: - raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time") - - if not urls: - urls = d.getVar("SRC_URI").split() - self.urls = urls - self.d = d - self.ud = {} - self.connection_cache = connection_cache - - fn = d.getVar('FILE') - mc = d.getVar('__BBMULTICONFIG') or "" - key = None - if cache and fn: - key = mc + fn + str(id(d)) - if key in urldata_cache: - self.ud = urldata_cache[key] - - # the unpack_tracer object needs to be made available to possible nested - # Fetch instances (when those are created by gitsm and npmsw fetchers) - # so we set it as a global variable - global unpack_tracer - try: - unpack_tracer - except NameError: - class_path = d.getVar("BB_UNPACK_TRACER_CLASS") - if class_path: - # use user-defined unpack tracer class - import importlib - module_name, _, class_name = class_path.rpartition(".") - module = importlib.import_module(module_name) - class_ = getattr(module, class_name) - unpack_tracer = class_() - else: - # fall back to the dummy/abstract class - unpack_tracer = DummyUnpackTracer() - - for url in urls: - if url not in self.ud: - try: - self.ud[url] = FetchData(url, d, localonly) - self.ud[url].unpack_tracer = unpack_tracer - except NonLocalMethod: - if localonly: - self.ud[url] = None - pass - - if key: - urldata_cache[key] = self.ud - - def localpath(self, url): - if url not in self.urls: - self.ud[url] = FetchData(url, self.d) - - self.ud[url].setup_localpath(self.d) - return self.ud[url].localpath - - def localpaths(self): - """ - Return a list of the local filenames, assuming successful fetch - """ - local = [] - - for u in self.urls: - ud = self.ud[u] - ud.setup_localpath(self.d) - local.append(ud.localpath) - - return local - - def download(self, urls=None): - """ - Fetch all urls - """ - if not urls: - urls = self.urls - - network = self.d.getVar("BB_NO_NETWORK") - premirroronly = bb.utils.to_boolean(self.d.getVar("BB_FETCH_PREMIRRORONLY")) - - checksum_missing_messages = [] - for u in urls: - ud = self.ud[u] - ud.setup_localpath(self.d) - m = ud.method - done = False - - if ud.lockfile: - lf = bb.utils.lockfile(ud.lockfile) - - try: - self.d.setVar("BB_NO_NETWORK", network) - if m.verify_donestamp(ud, self.d) and not m.need_update(ud, self.d): - done = True - elif m.try_premirror(ud, self.d): - logger.debug("Trying PREMIRRORS") - mirrors = mirror_from_string(self.d.getVar('PREMIRRORS')) - done = m.try_mirrors(self, ud, self.d, mirrors) - if done: - try: - # early checksum verification so that if the checksum of the premirror - # contents mismatch the fetcher can still try upstream and mirrors - m.update_donestamp(ud, self.d) - except ChecksumError as e: - logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u) - logger.debug(str(e)) - done = False - - d = self.d - if premirroronly: - # Only disable the network in a copy - d = bb.data.createCopy(self.d) - d.setVar("BB_NO_NETWORK", "1") - - firsterr = None - verified_stamp = False - if done: - verified_stamp = m.verify_donestamp(ud, d) - if not done and (not verified_stamp or m.need_update(ud, d)): - try: - if not trusted_network(d, ud.url): - raise UntrustedUrl(ud.url) - logger.debug("Trying Upstream") - m.download(ud, d) - if hasattr(m, "build_mirror_data"): - m.build_mirror_data(ud, d) - done = True - # early checksum verify, so that if checksum mismatched, - # fetcher still have chance to fetch from mirror - m.update_donestamp(ud, d) - - except bb.fetch2.NetworkAccess: - raise - - except BBFetchException as e: - if isinstance(e, ChecksumError): - logger.warning("Checksum failure encountered with download of %s - will attempt other sources if available" % u) - logger.debug(str(e)) - if os.path.exists(ud.localpath): - rename_bad_checksum(ud, e.checksum) - elif isinstance(e, NoChecksumError): - raise - else: - logger.warning('Failed to fetch URL %s, attempting MIRRORS if available' % u) - logger.debug(str(e)) - firsterr = e - # Remove any incomplete fetch - if not verified_stamp and m.cleanup_upon_failure(): - m.clean(ud, d) - logger.debug("Trying MIRRORS") - mirrors = mirror_from_string(d.getVar('MIRRORS')) - done = m.try_mirrors(self, ud, d, mirrors) - - if not done or not m.done(ud, d): - if firsterr: - logger.error(str(firsterr)) - raise FetchError("Unable to fetch URL from any source.", u) - - m.update_donestamp(ud, d) - - except IOError as e: - if e.errno in [errno.ESTALE]: - logger.error("Stale Error Observed %s." % u) - raise ChecksumError("Stale Error Detected") - - except BBFetchException as e: - if isinstance(e, NoChecksumError): - (message, _) = e.args - checksum_missing_messages.append(message) - continue - elif isinstance(e, ChecksumError): - logger.error("Checksum failure fetching %s" % u) - raise - - finally: - if ud.lockfile: - bb.utils.unlockfile(lf) - if checksum_missing_messages: - logger.error("Missing SRC_URI checksum, please add those to the recipe: \n%s", "\n".join(checksum_missing_messages)) - raise BBFetchException("There was some missing checksums in the recipe") - - def checkstatus(self, urls=None): - """ - Check all URLs exist upstream. - - Returns None if the URLs exist, raises FetchError if the check wasn't - successful but there wasn't an error (such as file not found), and - raises other exceptions in error cases. - """ - - if not urls: - urls = self.urls - - for u in urls: - ud = self.ud[u] - ud.setup_localpath(self.d) - m = ud.method - logger.debug("Testing URL %s", u) - # First try checking uri, u, from PREMIRRORS - mirrors = mirror_from_string(self.d.getVar('PREMIRRORS')) - ret = m.try_mirrors(self, ud, self.d, mirrors, True) - if not ret: - # Next try checking from the original uri, u - ret = m.checkstatus(self, ud, self.d) - if not ret: - # Finally, try checking uri, u, from MIRRORS - mirrors = mirror_from_string(self.d.getVar('MIRRORS')) - ret = m.try_mirrors(self, ud, self.d, mirrors, True) - - if not ret: - raise FetchError("URL doesn't work", u) - - def unpack(self, root, urls=None): - """ - Unpack urls to root - """ - - if not urls: - urls = self.urls - - unpack_tracer.start(root, self.ud, self.d) - - for u in urls: - ud = self.ud[u] - ud.setup_localpath(self.d) - - if ud.lockfile: - lf = bb.utils.lockfile(ud.lockfile) - - unpack_tracer.start_url(u) - ud.method.unpack(ud, root, self.d) - unpack_tracer.finish_url(u) - - if ud.lockfile: - bb.utils.unlockfile(lf) - - unpack_tracer.complete() - - def clean(self, urls=None): - """ - Clean files that the fetcher gets or places - """ - - if not urls: - urls = self.urls - - for url in urls: - if url not in self.ud: - self.ud[url] = FetchData(url, self.d) - ud = self.ud[url] - ud.setup_localpath(self.d) - - if not ud.localfile and ud.localpath is None: - continue - - if ud.lockfile: - lf = bb.utils.lockfile(ud.lockfile) - - ud.method.clean(ud, self.d) - if ud.donestamp: - bb.utils.remove(ud.donestamp) - - if ud.lockfile: - bb.utils.unlockfile(lf) - - def expanded_urldata(self, urls=None): - """ - Get an expanded list of FetchData objects covering both the given - URLS and any additional implicit URLs that are added automatically by - the appropriate FetchMethod. - """ - - if not urls: - urls = self.urls - - urldata = [] - for url in urls: - ud = self.ud[url] - urldata.append(ud) - urldata += ud.method.implicit_urldata(ud, self.d) - - return urldata - -class FetchConnectionCache(object): - """ - A class which represents an container for socket connections. - """ - def __init__(self): - self.cache = {} - - def get_connection_name(self, host, port): - return host + ':' + str(port) - - def add_connection(self, host, port, connection): - cn = self.get_connection_name(host, port) - - if cn not in self.cache: - self.cache[cn] = connection - - def get_connection(self, host, port): - connection = None - - cn = self.get_connection_name(host, port) - if cn in self.cache: - connection = self.cache[cn] - - return connection - - def remove_connection(self, host, port): - cn = self.get_connection_name(host, port) - if cn in self.cache: - self.cache[cn].close() - del self.cache[cn] - - def close_connections(self): - for cn in list(self.cache.keys()): - self.cache[cn].close() - del self.cache[cn] - -from . import cvs -from . import git -from . import gitsm -from . import gitannex -from . import local -from . import svn -from . import wget -from . import ssh -from . import sftp -from . import s3 -from . import perforce -from . import bzr -from . import hg -from . import osc -from . import repo -from . import clearcase -from . import npm -from . import npmsw -from . import az -from . import crate -from . import gcp -from . import gomod - -methods.append(local.Local()) -methods.append(wget.Wget()) -methods.append(svn.Svn()) -methods.append(git.Git()) -methods.append(gitsm.GitSM()) -methods.append(gitannex.GitANNEX()) -methods.append(cvs.Cvs()) -methods.append(ssh.SSH()) -methods.append(sftp.SFTP()) -methods.append(s3.S3()) -methods.append(perforce.Perforce()) -methods.append(bzr.Bzr()) -methods.append(hg.Hg()) -methods.append(osc.Osc()) -methods.append(repo.Repo()) -methods.append(clearcase.ClearCase()) -methods.append(npm.Npm()) -methods.append(npmsw.NpmShrinkWrap()) -methods.append(az.Az()) -methods.append(crate.Crate()) -methods.append(gcp.GCP()) -methods.append(gomod.GoMod()) -methods.append(gomod.GoModGit()) diff --git a/bitbake/lib/bb/fetch2/az.py b/bitbake/lib/bb/fetch2/az.py deleted file mode 100644 index 1d3664f213..0000000000 --- a/bitbake/lib/bb/fetch2/az.py +++ /dev/null @@ -1,98 +0,0 @@ -""" -BitBake 'Fetch' Azure Storage implementation - -""" - -# Copyright (C) 2021 Alejandro Hernandez Samaniego -# -# Based on bb.fetch2.wget: -# Copyright (C) 2003, 2004 Chris Larson -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import shlex -import os -import bb -from bb.fetch2 import FetchError -from bb.fetch2 import logger -from bb.fetch2.wget import Wget - - -class Az(Wget): - - def supports(self, ud, d): - """ - Check to see if a given url can be fetched from Azure Storage - """ - return ud.type in ['az'] - - - def checkstatus(self, fetch, ud, d, try_again=True): - - # checkstatus discards parameters either way, we need to do this before adding the SAS - ud.url = ud.url.replace('az://','https://').split(';')[0] - - az_sas = d.getVar('AZ_SAS') - if az_sas and az_sas not in ud.url: - if not az_sas.startswith('?'): - raise FetchError("When using AZ_SAS, it must start with a '?' character to mark the start of the query-parameters.") - ud.url += az_sas - - return Wget.checkstatus(self, fetch, ud, d, try_again) - - # Override download method, include retries - def download(self, ud, d, retries=3): - """Fetch urls""" - - # If were reaching the account transaction limit we might be refused a connection, - # retrying allows us to avoid false negatives since the limit changes over time - fetchcmd = self.basecmd + ' --retry-connrefused --waitretry=5' - - # We need to provide a localpath to avoid wget using the SAS - # ud.localfile either has the downloadfilename or ud.path - localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile) - bb.utils.mkdirhier(os.path.dirname(localpath)) - fetchcmd += " -O %s" % shlex.quote(localpath) - - - if ud.user and ud.pswd: - fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd) - - # Check if a Shared Access Signature was given and use it - az_sas = d.getVar('AZ_SAS') - - if az_sas: - if not az_sas.startswith('?'): - raise FetchError("When using AZ_SAS, it must start with a '?' character to mark the start of the query-parameters.") - azuri = '%s%s%s%s' % ('https://', ud.host, ud.path, az_sas) - else: - azuri = '%s%s%s' % ('https://', ud.host, ud.path) - - dldir = d.getVar("DL_DIR") - if os.path.exists(ud.localpath): - # file exists, but we didnt complete it.. trying again. - fetchcmd += " -c -P %s '%s'" % (dldir, azuri) - else: - fetchcmd += " -P %s '%s'" % (dldir, azuri) - - try: - self._runwget(ud, d, fetchcmd, False) - except FetchError as e: - # Azure fails on handshake sometimes when using wget after some stress, producing a - # FetchError from the fetcher, if the artifact exists retyring should succeed - if 'Unable to establish SSL connection' in str(e): - logger.debug2('Unable to establish SSL connection: Retries remaining: %s, Retrying...' % retries) - self.download(ud, d, retries -1) - - # Sanity check since wget can pretend it succeed when it didn't - # Also, this used to happen if sourceforge sent us to the mirror page - if not os.path.exists(ud.localpath): - raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (azuri, ud.localpath), azuri) - - if os.path.getsize(ud.localpath) == 0: - os.remove(ud.localpath) - raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (azuri), azuri) - - return True diff --git a/bitbake/lib/bb/fetch2/bzr.py b/bitbake/lib/bb/fetch2/bzr.py deleted file mode 100644 index fc558f50b0..0000000000 --- a/bitbake/lib/bb/fetch2/bzr.py +++ /dev/null @@ -1,128 +0,0 @@ -""" -BitBake 'Fetch' implementation for bzr. - -""" - -# Copyright (C) 2007 Ross Burton -# Copyright (C) 2007 Richard Purdie -# -# Classes for obtaining upstream sources for the -# BitBake build tools. -# Copyright (C) 2003, 2004 Chris Larson -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import bb -from bb.fetch2 import FetchMethod -from bb.fetch2 import FetchError -from bb.fetch2 import runfetchcmd -from bb.fetch2 import logger - -class Bzr(FetchMethod): - def supports(self, ud, d): - return ud.type in ['bzr'] - - def urldata_init(self, ud, d): - """ - init bzr specific variable within url data - """ - # Create paths to bzr checkouts - bzrdir = d.getVar("BZRDIR") or (d.getVar("DL_DIR") + "/bzr") - relpath = self._strip_leading_slashes(ud.path) - ud.pkgdir = os.path.join(bzrdir, ud.host, relpath) - - ud.setup_revisions(d) - - if not ud.revision: - ud.revision = self.latest_revision(ud, d) - - ud.localfile = d.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision)) - - def _buildbzrcommand(self, ud, d, command): - """ - Build up an bzr commandline based on ud - command is "fetch", "update", "revno" - """ - - basecmd = d.getVar("FETCHCMD_bzr") or "/usr/bin/env bzr" - - proto = ud.parm.get('protocol', 'http') - - bzrroot = ud.host + ud.path - - options = [] - - if command == "revno": - bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot) - else: - if ud.revision: - options.append("-r %s" % ud.revision) - - if command == "fetch": - bzrcmd = "%s branch %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot) - elif command == "update": - bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options)) - else: - raise FetchError("Invalid bzr command %s" % command, ud.url) - - return bzrcmd - - def download(self, ud, d): - """Fetch url""" - - if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK): - bzrcmd = self._buildbzrcommand(ud, d, "update") - logger.debug("BZR Update %s", ud.url) - bb.fetch2.check_network_access(d, bzrcmd, ud.url) - runfetchcmd(bzrcmd, d, workdir=os.path.join(ud.pkgdir, os.path.basename(ud.path))) - else: - bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True) - bzrcmd = self._buildbzrcommand(ud, d, "fetch") - bb.fetch2.check_network_access(d, bzrcmd, ud.url) - logger.debug("BZR Checkout %s", ud.url) - bb.utils.mkdirhier(ud.pkgdir) - logger.debug("Running %s", bzrcmd) - runfetchcmd(bzrcmd, d, workdir=ud.pkgdir) - - scmdata = ud.parm.get("scmdata", "") - if scmdata == "keep": - tar_flags = "" - else: - tar_flags = "--exclude='.bzr' --exclude='.bzrtags'" - - # tar them up to a defined filename - runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(ud.pkgdir)), - d, cleanup=[ud.localpath], workdir=ud.pkgdir) - - def supports_srcrev(self): - return True - - def _revision_key(self, ud, d, name): - """ - Return a unique key for the url - """ - return "bzr:" + ud.pkgdir - - def _latest_revision(self, ud, d, name): - """ - Return the latest upstream revision number - """ - logger.debug2("BZR fetcher hitting network for %s", ud.url) - - bb.fetch2.check_network_access(d, self._buildbzrcommand(ud, d, "revno"), ud.url) - - output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True) - - return output.strip() - - def sortable_revision(self, ud, d, name): - """ - Return a sortable revision number which in our case is the revision number - """ - - return False, self._build_revision(ud, d) - - def _build_revision(self, ud, d): - return ud.revision diff --git a/bitbake/lib/bb/fetch2/clearcase.py b/bitbake/lib/bb/fetch2/clearcase.py deleted file mode 100644 index 17500daf95..0000000000 --- a/bitbake/lib/bb/fetch2/clearcase.py +++ /dev/null @@ -1,245 +0,0 @@ -""" -BitBake 'Fetch' clearcase implementation - -The clearcase fetcher is used to retrieve files from a ClearCase repository. - -Usage in the recipe: - - SRC_URI = "ccrc://cc.example.org/ccrc;vob=/example_vob;module=/example_module" - SRCREV = "EXAMPLE_CLEARCASE_TAG" - PV = "${@d.getVar("SRCREV", False).replace("/", "+")}" - -The fetcher uses the rcleartool or cleartool remote client, depending on which one is available. - -Supported SRC_URI options are: - -- vob - (required) The name of the clearcase VOB (with prepending "/") - -- module - The module in the selected VOB (with prepending "/") - - The module and vob parameters are combined to create - the following load rule in the view config spec: - load - -- proto - http or https - -Related variables: - - CCASE_CUSTOM_CONFIG_SPEC - Write a config spec to this variable in your recipe to use it instead - of the default config spec generated by this fetcher. - Please note that the SRCREV loses its functionality if you specify - this variable. SRCREV is still used to label the archive after a fetch, - but it doesn't define what's fetched. - -User credentials: - cleartool: - The login of cleartool is handled by the system. No special steps needed. - - rcleartool: - In order to use rcleartool with authenticated users an `rcleartool login` is - necessary before using the fetcher. -""" -# Copyright (C) 2014 Siemens AG -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import shutil -import bb -from bb.fetch2 import FetchMethod -from bb.fetch2 import FetchError -from bb.fetch2 import MissingParameterError -from bb.fetch2 import ParameterError -from bb.fetch2 import runfetchcmd -from bb.fetch2 import logger - -class ClearCase(FetchMethod): - """Class to fetch urls via 'clearcase'""" - def init(self, d): - pass - - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with Clearcase. - """ - return ud.type in ['ccrc'] - - def debug(self, msg): - logger.debug("ClearCase: %s", msg) - - def urldata_init(self, ud, d): - """ - init ClearCase specific variable within url data - """ - ud.proto = "https" - if 'protocol' in ud.parm: - ud.proto = ud.parm['protocol'] - if not ud.proto in ('http', 'https'): - raise ParameterError("Invalid protocol type", ud.url) - - ud.vob = '' - if 'vob' in ud.parm: - ud.vob = ud.parm['vob'] - else: - msg = ud.url+": vob must be defined so the fetcher knows what to get." - raise MissingParameterError('vob', msg) - - if 'module' in ud.parm: - ud.module = ud.parm['module'] - else: - ud.module = "" - - ud.basecmd = d.getVar("FETCHCMD_ccrc") or "/usr/bin/env cleartool || rcleartool" - - if d.getVar("SRCREV") == "INVALID": - raise FetchError("Set a valid SRCREV for the clearcase fetcher in your recipe, e.g. SRCREV = \"/main/LATEST\" or any other label of your choice.") - - ud.label = d.getVar("SRCREV", False) - ud.customspec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC") - - ud.server = "%s://%s%s" % (ud.proto, ud.host, ud.path) - - ud.identifier = "clearcase-%s%s-%s" % ( ud.vob.replace("/", ""), - ud.module.replace("/", "."), - ud.label.replace("/", ".")) - - ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME")) - ud.csname = "%s-config-spec" % (ud.identifier) - ud.ccasedir = os.path.join(d.getVar("DL_DIR"), ud.type) - ud.viewdir = os.path.join(ud.ccasedir, ud.viewname) - ud.configspecfile = os.path.join(ud.ccasedir, ud.csname) - ud.localfile = "%s.tar.gz" % (ud.identifier) - - self.debug("host = %s" % ud.host) - self.debug("path = %s" % ud.path) - self.debug("server = %s" % ud.server) - self.debug("proto = %s" % ud.proto) - self.debug("type = %s" % ud.type) - self.debug("vob = %s" % ud.vob) - self.debug("module = %s" % ud.module) - self.debug("basecmd = %s" % ud.basecmd) - self.debug("label = %s" % ud.label) - self.debug("ccasedir = %s" % ud.ccasedir) - self.debug("viewdir = %s" % ud.viewdir) - self.debug("viewname = %s" % ud.viewname) - self.debug("configspecfile = %s" % ud.configspecfile) - self.debug("localfile = %s" % ud.localfile) - - def _build_ccase_command(self, ud, command): - """ - Build up a commandline based on ud - command is: mkview, setcs, rmview - """ - options = [] - - if "rcleartool" in ud.basecmd: - options.append("-server %s" % ud.server) - - basecmd = "%s %s" % (ud.basecmd, command) - - if command == 'mkview': - if not "rcleartool" in ud.basecmd: - # Cleartool needs a -snapshot view - options.append("-snapshot") - options.append("-tag %s" % ud.viewname) - options.append(ud.viewdir) - - elif command == 'rmview': - options.append("-force") - options.append("%s" % ud.viewdir) - - elif command == 'setcs': - options.append("-overwrite") - options.append(ud.configspecfile) - - else: - raise FetchError("Invalid ccase command %s" % command) - - ccasecmd = "%s %s" % (basecmd, " ".join(options)) - self.debug("ccasecmd = %s" % ccasecmd) - return ccasecmd - - def _write_configspec(self, ud, d): - """ - Create config spec file (ud.configspecfile) for ccase view - """ - config_spec = "" - custom_config_spec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC", d) - if custom_config_spec is not None: - for line in custom_config_spec.split("\\n"): - config_spec += line+"\n" - bb.warn("A custom config spec has been set, SRCREV is only relevant for the tarball name.") - else: - config_spec += "element * CHECKEDOUT\n" - config_spec += "element * %s\n" % ud.label - config_spec += "load %s%s\n" % (ud.vob, ud.module) - - logger.info("Using config spec: \n%s" % config_spec) - - with open(ud.configspecfile, 'w') as f: - f.write(config_spec) - - def _remove_view(self, ud, d): - if os.path.exists(ud.viewdir): - cmd = self._build_ccase_command(ud, 'rmview'); - logger.info("cleaning up [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname) - bb.fetch2.check_network_access(d, cmd, ud.url) - output = runfetchcmd(cmd, d, workdir=ud.ccasedir) - logger.info("rmview output: %s", output) - - def need_update(self, ud, d): - if ("LATEST" in ud.label) or (ud.customspec and "LATEST" in ud.customspec): - ud.identifier += "-%s" % d.getVar("DATETIME") - return True - if os.path.exists(ud.localpath): - return False - return True - - def supports_srcrev(self): - return True - - def sortable_revision(self, ud, d, name): - return False, ud.identifier - - def download(self, ud, d): - """Fetch url""" - - # Make a fresh view - bb.utils.mkdirhier(ud.ccasedir) - self._write_configspec(ud, d) - cmd = self._build_ccase_command(ud, 'mkview') - logger.info("creating view [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname) - bb.fetch2.check_network_access(d, cmd, ud.url) - try: - runfetchcmd(cmd, d) - except FetchError as e: - if "CRCLI2008E" in e.msg: - raise FetchError("%s\n%s\n" % (e.msg, "Call `rcleartool login` in your console to authenticate to the clearcase server before running bitbake.")) - else: - raise e - - # Set configspec: Setting the configspec effectively fetches the files as defined in the configspec - cmd = self._build_ccase_command(ud, 'setcs'); - logger.info("fetching data [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname) - bb.fetch2.check_network_access(d, cmd, ud.url) - output = runfetchcmd(cmd, d, workdir=ud.viewdir) - logger.info("%s", output) - - # Copy the configspec to the viewdir so we have it in our source tarball later - shutil.copyfile(ud.configspecfile, os.path.join(ud.viewdir, ud.csname)) - - # Clean clearcase meta-data before tar - - runfetchcmd('tar -czf "%s" .' % (ud.localpath), d, cleanup = [ud.localpath], workdir = ud.viewdir) - - # Clean up so we can create a new view next time - self.clean(ud, d); - - def clean(self, ud, d): - self._remove_view(ud, d) - bb.utils.remove(ud.configspecfile) diff --git a/bitbake/lib/bb/fetch2/crate.py b/bitbake/lib/bb/fetch2/crate.py deleted file mode 100644 index e611736f06..0000000000 --- a/bitbake/lib/bb/fetch2/crate.py +++ /dev/null @@ -1,150 +0,0 @@ -# ex:ts=4:sw=4:sts=4:et -# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- -""" -BitBake 'Fetch' implementation for crates.io -""" - -# Copyright (C) 2016 Doug Goldstein -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import hashlib -import json -import os -import subprocess -import bb -from bb.fetch2 import logger, subprocess_setup, UnpackError -from bb.fetch2.wget import Wget - - -class Crate(Wget): - - """Class to fetch crates via wget""" - - def _cargo_bitbake_path(self, rootdir): - return os.path.join(rootdir, "cargo_home", "bitbake") - - def supports(self, ud, d): - """ - Check to see if a given url is for this fetcher - """ - return ud.type in ['crate'] - - def recommends_checksum(self, urldata): - return True - - def urldata_init(self, ud, d): - """ - Sets up to download the respective crate from crates.io - """ - - if ud.type == 'crate': - self._crate_urldata_init(ud, d) - - super(Crate, self).urldata_init(ud, d) - - def _crate_urldata_init(self, ud, d): - """ - Sets up the download for a crate - """ - - # URL syntax is: crate://NAME/VERSION - # break the URL apart by / - parts = ud.url.split('/') - if len(parts) < 5: - raise bb.fetch2.ParameterError("Invalid URL: Must be crate://HOST/NAME/VERSION", ud.url) - - # version is expected to be the last token - # but ignore possible url parameters which will be used - # by the top fetcher class - version = parts[-1].split(";")[0] - # second to last field is name - name = parts[-2] - # host (this is to allow custom crate registries to be specified - host = '/'.join(parts[2:-2]) - - # if using upstream just fix it up nicely - if host == 'crates.io': - host = 'crates.io/api/v1/crates' - - ud.url = "https://%s/%s/%s/download" % (host, name, version) - ud.versionsurl = "https://%s/%s/versions" % (host, name) - ud.parm['downloadfilename'] = "%s-%s.crate" % (name, version) - if 'name' not in ud.parm: - ud.parm['name'] = '%s-%s' % (name, version) - - logger.debug2("Fetching %s to %s" % (ud.url, ud.parm['downloadfilename'])) - - def unpack(self, ud, rootdir, d): - """ - Uses the crate to build the necessary paths for cargo to utilize it - """ - if ud.type == 'crate': - return self._crate_unpack(ud, rootdir, d) - else: - super(Crate, self).unpack(ud, rootdir, d) - - def _crate_unpack(self, ud, rootdir, d): - """ - Unpacks a crate - """ - thefile = ud.localpath - - # possible metadata we need to write out - metadata = {} - - # change to the rootdir to unpack but save the old working dir - save_cwd = os.getcwd() - os.chdir(rootdir) - - bp = d.getVar('BP') - if bp == ud.parm.get('name'): - cmd = "tar -xz --no-same-owner -f %s" % thefile - ud.unpack_tracer.unpack("crate-extract", rootdir) - else: - cargo_bitbake = self._cargo_bitbake_path(rootdir) - ud.unpack_tracer.unpack("cargo-extract", cargo_bitbake) - - cmd = "tar -xz --no-same-owner -f %s -C %s" % (thefile, cargo_bitbake) - - # ensure we've got these paths made - bb.utils.mkdirhier(cargo_bitbake) - - # generate metadata necessary - with open(thefile, 'rb') as f: - # get the SHA256 of the original tarball - tarhash = hashlib.sha256(f.read()).hexdigest() - - metadata['files'] = {} - metadata['package'] = tarhash - - path = d.getVar('PATH') - if path: - cmd = "PATH=\"%s\" %s" % (path, cmd) - bb.note("Unpacking %s to %s/" % (thefile, os.getcwd())) - - ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True) - - os.chdir(save_cwd) - - if ret != 0: - raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), ud.url) - - # if we have metadata to write out.. - if len(metadata) > 0: - cratepath = os.path.splitext(os.path.basename(thefile))[0] - bbpath = self._cargo_bitbake_path(rootdir) - mdfile = '.cargo-checksum.json' - mdpath = os.path.join(bbpath, cratepath, mdfile) - with open(mdpath, "w") as f: - json.dump(metadata, f) - - def latest_versionstring(self, ud, d): - from functools import cmp_to_key - json_data = json.loads(self._fetch_index(ud.versionsurl, ud, d)) - versions = [(0, i["num"], "") for i in json_data["versions"]] - versions = sorted(versions, key=cmp_to_key(bb.utils.vercmp)) - - return (versions[-1][1], "") diff --git a/bitbake/lib/bb/fetch2/cvs.py b/bitbake/lib/bb/fetch2/cvs.py deleted file mode 100644 index 01de5ff4ca..0000000000 --- a/bitbake/lib/bb/fetch2/cvs.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -BitBake 'Fetch' implementations - -Classes for obtaining upstream sources for the -BitBake build tools. - -""" - -# Copyright (C) 2003, 2004 Chris Larson -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig -# - -import os -import bb -from bb.fetch2 import FetchMethod, FetchError, MissingParameterError, logger -from bb.fetch2 import runfetchcmd - -class Cvs(FetchMethod): - """ - Class to fetch a module or modules from cvs repositories - """ - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with cvs. - """ - return ud.type in ['cvs'] - - def urldata_init(self, ud, d): - if not "module" in ud.parm: - raise MissingParameterError("module", ud.url) - ud.module = ud.parm["module"] - - ud.tag = ud.parm.get('tag', "") - - # Override the default date in certain cases - if 'date' in ud.parm: - ud.date = ud.parm['date'] - elif ud.tag: - ud.date = "" - - norecurse = '' - if 'norecurse' in ud.parm: - norecurse = '_norecurse' - - fullpath = '' - if 'fullpath' in ud.parm: - fullpath = '_fullpath' - - ud.localfile = d.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath)) - - pkg = d.getVar('PN') - cvsdir = d.getVar("CVSDIR") or (d.getVar("DL_DIR") + "/cvs") - ud.pkgdir = os.path.join(cvsdir, pkg) - - def need_update(self, ud, d): - if (ud.date == "now"): - return True - if not os.path.exists(ud.localpath): - return True - return False - - def download(self, ud, d): - - method = ud.parm.get('method', 'pserver') - localdir = ud.parm.get('localdir', ud.module) - cvs_port = ud.parm.get('port', '') - - cvs_rsh = None - if method == "ext": - if "rsh" in ud.parm: - cvs_rsh = ud.parm["rsh"] - - if method == "dir": - cvsroot = ud.path - else: - cvsroot = ":" + method - cvsproxyhost = d.getVar('CVS_PROXY_HOST') - if cvsproxyhost: - cvsroot += ";proxy=" + cvsproxyhost - cvsproxyport = d.getVar('CVS_PROXY_PORT') - if cvsproxyport: - cvsroot += ";proxyport=" + cvsproxyport - cvsroot += ":" + ud.user - if ud.pswd: - cvsroot += ":" + ud.pswd - cvsroot += "@" + ud.host + ":" + cvs_port + ud.path - - options = [] - if 'norecurse' in ud.parm: - options.append("-l") - if ud.date: - # treat YYYYMMDDHHMM specially for CVS - if len(ud.date) == 12: - options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12])) - else: - options.append("-D \"%s UTC\"" % ud.date) - if ud.tag: - options.append("-r %s" % ud.tag) - - cvsbasecmd = d.getVar("FETCHCMD_cvs") or "/usr/bin/env cvs" - cvscmd = cvsbasecmd + " '-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module - cvsupdatecmd = cvsbasecmd + " '-d" + cvsroot + "' update -d -P " + " ".join(options) - - if cvs_rsh: - cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd) - cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd) - - # create module directory - logger.debug2("Fetch: checking for module directory") - moddir = os.path.join(ud.pkgdir, localdir) - workdir = None - if os.access(os.path.join(moddir, 'CVS'), os.R_OK): - logger.info("Update " + ud.url) - bb.fetch2.check_network_access(d, cvsupdatecmd, ud.url) - # update sources there - workdir = moddir - cmd = cvsupdatecmd - else: - logger.info("Fetch " + ud.url) - # check out sources there - bb.utils.mkdirhier(ud.pkgdir) - workdir = ud.pkgdir - logger.debug("Running %s", cvscmd) - bb.fetch2.check_network_access(d, cvscmd, ud.url) - cmd = cvscmd - - runfetchcmd(cmd, d, cleanup=[moddir], workdir=workdir) - - if not os.access(moddir, os.R_OK): - raise FetchError("Directory %s was not readable despite sucessful fetch?!" % moddir, ud.url) - - scmdata = ud.parm.get("scmdata", "") - if scmdata == "keep": - tar_flags = "" - else: - tar_flags = "--exclude='CVS'" - - # tar them up to a defined filename - workdir = None - if 'fullpath' in ud.parm: - workdir = ud.pkgdir - cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, localdir) - else: - workdir = os.path.dirname(os.path.realpath(moddir)) - cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(moddir)) - - runfetchcmd(cmd, d, cleanup=[ud.localpath], workdir=workdir) - - def clean(self, ud, d): - """ Clean CVS Files and tarballs """ - - bb.utils.remove(ud.pkgdir, True) - bb.utils.remove(ud.localpath) - diff --git a/bitbake/lib/bb/fetch2/gcp.py b/bitbake/lib/bb/fetch2/gcp.py deleted file mode 100644 index 86546d40bf..0000000000 --- a/bitbake/lib/bb/fetch2/gcp.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -BitBake 'Fetch' implementation for Google Cloup Platform Storage. - -Class for fetching files from Google Cloud Storage using the -Google Cloud Storage Python Client. The GCS Python Client must -be correctly installed, configured and authenticated prior to use. -Additionally, gsutil must also be installed. - -""" - -# Copyright (C) 2023, Snap Inc. -# -# Based in part on bb.fetch2.s3: -# Copyright (C) 2017 Andre McCurdy -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import os -import bb -import urllib.parse, urllib.error -from bb.fetch2 import FetchMethod -from bb.fetch2 import FetchError -from bb.fetch2 import logger - -class GCP(FetchMethod): - """ - Class to fetch urls via GCP's Python API. - """ - def __init__(self): - self.gcp_client = None - - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with GCP. - """ - return ud.type in ['gs'] - - def recommends_checksum(self, urldata): - return True - - def urldata_init(self, ud, d): - if 'downloadfilename' in ud.parm: - ud.basename = ud.parm['downloadfilename'] - else: - ud.basename = os.path.basename(ud.path) - - ud.localfile = ud.basename - - def get_gcp_client(self): - from google.cloud import storage - self.gcp_client = storage.Client(project=None) - - def download(self, ud, d): - """ - Fetch urls using the GCP API. - Assumes localpath was called first. - """ - from google.api_core.exceptions import NotFound - logger.debug2(f"Trying to download gs://{ud.host}{ud.path} to {ud.localpath}") - if self.gcp_client is None: - self.get_gcp_client() - - bb.fetch2.check_network_access(d, "blob.download_to_filename", f"gs://{ud.host}{ud.path}") - - # Path sometimes has leading slash, so strip it - path = ud.path.lstrip("/") - blob = self.gcp_client.bucket(ud.host).blob(path) - try: - blob.download_to_filename(ud.localpath) - except NotFound: - raise FetchError("The GCP API threw a NotFound exception") - - # Additional sanity checks copied from the wget class (although there - # are no known issues which mean these are required, treat the GCP API - # tool with a little healthy suspicion). - if not os.path.exists(ud.localpath): - raise FetchError(f"The GCP API returned success for gs://{ud.host}{ud.path} but {ud.localpath} doesn't exist?!") - - if os.path.getsize(ud.localpath) == 0: - os.remove(ud.localpath) - raise FetchError(f"The downloaded file for gs://{ud.host}{ud.path} resulted in a zero size file?! Deleting and failing since this isn't right.") - - return True - - def checkstatus(self, fetch, ud, d): - """ - Check the status of a URL. - """ - logger.debug2(f"Checking status of gs://{ud.host}{ud.path}") - if self.gcp_client is None: - self.get_gcp_client() - - bb.fetch2.check_network_access(d, "gcp_client.bucket(ud.host).blob(path).exists()", f"gs://{ud.host}{ud.path}") - - # Path sometimes has leading slash, so strip it - path = ud.path.lstrip("/") - if self.gcp_client.bucket(ud.host).blob(path).exists() == False: - raise FetchError(f"The GCP API reported that gs://{ud.host}{ud.path} does not exist") - else: - return True diff --git a/bitbake/lib/bb/fetch2/git.py b/bitbake/lib/bb/fetch2/git.py deleted file mode 100644 index 0fcdb19df1..0000000000 --- a/bitbake/lib/bb/fetch2/git.py +++ /dev/null @@ -1,1010 +0,0 @@ -""" -BitBake 'Fetch' git implementation - -git fetcher support the SRC_URI with format of: -SRC_URI = "git://some.host/somepath;OptionA=xxx;OptionB=xxx;..." - -Supported SRC_URI options are: - -- branch - The git branch to retrieve from. The default is "master" - -- tag - The git tag to retrieve. The default is "master" - -- protocol - The method to use to access the repository. Common options are "git", - "http", "https", "file", "ssh" and "rsync". The default is "git". - -- rebaseable - rebaseable indicates that the upstream git repo may rebase in the future, - and current revision may disappear from upstream repo. This option will - remind fetcher to preserve local cache carefully for future use. - The default value is "0", set rebaseable=1 for rebaseable git repo. - -- nocheckout - Don't checkout source code when unpacking. set this option for the recipe - who has its own routine to checkout code. - The default is "0", set nocheckout=1 if needed. - -- bareclone - Create a bare clone of the source code and don't checkout the source code - when unpacking. Set this option for the recipe who has its own routine to - checkout code and tracking branch requirements. - The default is "0", set bareclone=1 if needed. - -- nobranch - Don't check the SHA validation for branch. set this option for the recipe - referring to commit which is valid in any namespace (branch, tag, ...) - instead of branch. - The default is "0", set nobranch=1 if needed. - -- subpath - Limit the checkout to a specific subpath of the tree. - By default, checkout the whole tree, set subpath= if needed - -- destsuffix - The name of the path in which to place the checkout. - By default, the path is git/, set destsuffix= if needed - -- usehead - For local git:// urls to use the current branch HEAD as the revision for use with - AUTOREV. Implies nobranch. - -- lfs - Enable the checkout to use LFS for large files. This will download all LFS files - in the download step, as the unpack step does not have network access. - The default is "1", set lfs=0 to skip. - -""" - -# Copyright (C) 2005 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import collections -import errno -import fnmatch -import os -import re -import shlex -import shutil -import subprocess -import tempfile -import urllib -import bb -import bb.progress -from contextlib import contextmanager -from bb.fetch2 import FetchMethod -from bb.fetch2 import runfetchcmd -from bb.fetch2 import logger -from bb.fetch2 import trusted_network - - -sha1_re = re.compile(r'^[0-9a-f]{40}$') -slash_re = re.compile(r"/+") - -class GitProgressHandler(bb.progress.LineFilterProgressHandler): - """Extract progress information from git output""" - def __init__(self, d): - self._buffer = '' - self._count = 0 - super(GitProgressHandler, self).__init__(d) - # Send an initial progress event so the bar gets shown - self._fire_progress(-1) - - def write(self, string): - self._buffer += string - stages = ['Counting objects', 'Compressing objects', 'Receiving objects', 'Resolving deltas'] - stage_weights = [0.2, 0.05, 0.5, 0.25] - stagenum = 0 - for i, stage in reversed(list(enumerate(stages))): - if stage in self._buffer: - stagenum = i - self._buffer = '' - break - self._status = stages[stagenum] - percs = re.findall(r'(\d+)%', string) - if percs: - progress = int(round((int(percs[-1]) * stage_weights[stagenum]) + (sum(stage_weights[:stagenum]) * 100))) - rates = re.findall(r'([\d.]+ [a-zA-Z]*/s+)', string) - if rates: - rate = rates[-1] - else: - rate = None - self.update(progress, rate) - else: - if stagenum == 0: - percs = re.findall(r': (\d+)', string) - if percs: - count = int(percs[-1]) - if count > self._count: - self._count = count - self._fire_progress(-count) - super(GitProgressHandler, self).write(string) - - -class Git(FetchMethod): - bitbake_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.join(os.path.abspath(__file__))), '..', '..', '..')) - make_shallow_path = os.path.join(bitbake_dir, 'bin', 'git-make-shallow') - - """Class to fetch a module or modules from git repositories""" - def init(self, d): - pass - - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with git. - """ - return ud.type in ['git'] - - def supports_checksum(self, urldata): - return False - - def cleanup_upon_failure(self): - return False - - def urldata_init(self, ud, d): - """ - init git specific variable within url data - so that the git method like latest_revision() can work - """ - if 'protocol' in ud.parm: - ud.proto = ud.parm['protocol'] - elif not ud.host: - ud.proto = 'file' - else: - ud.proto = "git" - if ud.host == "github.com" and ud.proto == "git": - # github stopped supporting git protocol - # https://github.blog/2021-09-01-improving-git-protocol-security-github/#no-more-unauthenticated-git - ud.proto = "https" - bb.warn("URL: %s uses git protocol which is no longer supported by github. Please change to ;protocol=https in the url." % ud.url) - - if not ud.proto in ('git', 'file', 'ssh', 'http', 'https', 'rsync'): - raise bb.fetch2.ParameterError(f"Invalid protocol type: '{ud.proto}'", ud.url) - - ud.nocheckout = ud.parm.get("nocheckout","0") == "1" - - ud.rebaseable = ud.parm.get("rebaseable","0") == "1" - - ud.nobranch = ud.parm.get("nobranch","0") == "1" - - # usehead implies nobranch - ud.usehead = ud.parm.get("usehead","0") == "1" - if ud.usehead: - if ud.proto != "file": - raise bb.fetch2.ParameterError("The usehead option is only for use with local ('protocol=file') git repositories", ud.url) - ud.nobranch = 1 - - # bareclone implies nocheckout - ud.bareclone = ud.parm.get("bareclone","0") == "1" - if ud.bareclone: - ud.nocheckout = 1 - - ud.unresolvedrev = "" - ud.branch = ud.parm.get("branch", "") - if not ud.branch and not ud.nobranch: - raise bb.fetch2.ParameterError("The url does not set any branch parameter or set nobranch=1.", ud.url) - - ud.noshared = d.getVar("BB_GIT_NOSHARED") == "1" - - ud.cloneflags = "-n" - if not ud.noshared: - ud.cloneflags += " -s" - if ud.bareclone: - ud.cloneflags += " --mirror" - - ud.shallow_skip_fast = False - ud.shallow = d.getVar("BB_GIT_SHALLOW") == "1" - ud.shallow_extra_refs = (d.getVar("BB_GIT_SHALLOW_EXTRA_REFS") or "").split() - if 'tag' in ud.parm: - ud.shallow_extra_refs.append("refs/tags/" + ud.parm['tag']) - - depth_default = d.getVar("BB_GIT_SHALLOW_DEPTH") - if depth_default is not None: - try: - depth_default = int(depth_default or 0) - except ValueError: - raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default) - else: - if depth_default < 0: - raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default) - else: - depth_default = 1 - ud.shallow_depths = collections.defaultdict(lambda: depth_default) - - revs_default = d.getVar("BB_GIT_SHALLOW_REVS") - ud.shallow_revs = [] - - ud.unresolvedrev = ud.branch - - shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % ud.name) - if shallow_depth is not None: - try: - shallow_depth = int(shallow_depth or 0) - except ValueError: - raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (ud.name, shallow_depth)) - else: - if shallow_depth < 0: - raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (ud.name, shallow_depth)) - ud.shallow_depths[ud.name] = shallow_depth - - revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % ud.name) - if revs is not None: - ud.shallow_revs.extend(revs.split()) - elif revs_default is not None: - ud.shallow_revs.extend(revs_default.split()) - - if ud.shallow and not ud.shallow_revs and ud.shallow_depths[ud.name] == 0: - # Shallow disabled for this URL - ud.shallow = False - - if ud.usehead: - # When usehead is set let's associate 'HEAD' with the unresolved - # rev of this repository. This will get resolved into a revision - # later. If an actual revision happens to have also been provided - # then this setting will be overridden. - ud.unresolvedrev = 'HEAD' - - ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat -c safe.bareRepository=all -c clone.defaultRemoteName=origin" - - write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0" - ud.write_tarballs = write_tarballs != "0" or ud.rebaseable - ud.write_shallow_tarballs = (d.getVar("BB_GENERATE_SHALLOW_TARBALLS") or write_tarballs) != "0" - - ud.setup_revisions(d) - - # Ensure any revision that doesn't look like a SHA-1 is translated into one - if not sha1_re.match(ud.revision or ''): - if ud.revision: - ud.unresolvedrev = ud.revision - ud.revision = self.latest_revision(ud, d, ud.name) - - gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.').replace(' ','_').replace('(', '_').replace(')', '_')) - if gitsrcname.startswith('.'): - gitsrcname = gitsrcname[1:] - - # For a rebaseable git repo, it is necessary to keep a mirror tar ball - # per revision, so that even if the revision disappears from the - # upstream repo in the future, the mirror will remain intact and still - # contain the revision - if ud.rebaseable: - gitsrcname = gitsrcname + '_' + ud.revision - - dl_dir = d.getVar("DL_DIR") - gitdir = d.getVar("GITDIR") or (dl_dir + "/git2") - ud.clonedir = os.path.join(gitdir, gitsrcname) - ud.localfile = ud.clonedir - - mirrortarball = 'git2_%s.tar.gz' % gitsrcname - ud.fullmirror = os.path.join(dl_dir, mirrortarball) - ud.mirrortarballs = [mirrortarball] - if ud.shallow: - tarballname = gitsrcname - if ud.bareclone: - tarballname = "%s_bare" % tarballname - - if ud.shallow_revs: - tarballname = "%s_%s" % (tarballname, "_".join(sorted(ud.shallow_revs))) - - tarballname = "%s_%s" % (tarballname, ud.revision[:7]) - depth = ud.shallow_depths[ud.name] - if depth: - tarballname = "%s-%s" % (tarballname, depth) - - shallow_refs = [] - if not ud.nobranch: - shallow_refs.append(ud.branch) - if ud.shallow_extra_refs: - shallow_refs.extend(r.replace('refs/heads/', '').replace('*', 'ALL') for r in ud.shallow_extra_refs) - if shallow_refs: - tarballname = "%s_%s" % (tarballname, "_".join(sorted(shallow_refs)).replace('/', '.')) - - fetcher = self.__class__.__name__.lower() - ud.shallowtarball = '%sshallow_%s.tar.gz' % (fetcher, tarballname) - ud.fullshallow = os.path.join(dl_dir, ud.shallowtarball) - ud.mirrortarballs.insert(0, ud.shallowtarball) - - def localpath(self, ud, d): - return ud.clonedir - - def need_update(self, ud, d): - return self.clonedir_need_update(ud, d) \ - or self.shallow_tarball_need_update(ud) \ - or self.tarball_need_update(ud) \ - or self.lfs_need_update(ud, d) - - def clonedir_need_update(self, ud, d): - if not os.path.exists(ud.clonedir): - return True - if ud.shallow and ud.write_shallow_tarballs and self.clonedir_need_shallow_revs(ud, d): - return True - if not self._contains_ref(ud, d, ud.name, ud.clonedir): - return True - if 'tag' in ud.parm and not self._contains_ref(ud, d, ud.name, ud.clonedir, tag=True): - return True - return False - - def lfs_need_update(self, ud, d): - if not self._need_lfs(ud): - return False - - if self.clonedir_need_update(ud, d): - return True - - if not self._lfs_objects_downloaded(ud, d, ud.clonedir): - return True - return False - - def clonedir_need_shallow_revs(self, ud, d): - for rev in ud.shallow_revs: - try: - runfetchcmd('%s rev-parse -q --verify %s' % (ud.basecmd, rev), d, quiet=True, workdir=ud.clonedir) - except bb.fetch2.FetchError: - return rev - return None - - def shallow_tarball_need_update(self, ud): - return ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow) - - def tarball_need_update(self, ud): - return ud.write_tarballs and not os.path.exists(ud.fullmirror) - - def update_mirror_links(self, ud, origud): - super().update_mirror_links(ud, origud) - # When using shallow mode, add a symlink to the original fullshallow - # path to ensure a valid symlink even in the `PREMIRRORS` case - if ud.shallow and not os.path.exists(origud.fullshallow): - self.ensure_symlink(ud.localpath, origud.fullshallow) - - def try_premirror(self, ud, d): - # If we don't do this, updating an existing checkout with only premirrors - # is not possible - if bb.utils.to_boolean(d.getVar("BB_FETCH_PREMIRRORONLY")): - return True - # If the url is not in trusted network, that is, BB_NO_NETWORK is set to 0 - # and BB_ALLOWED_NETWORKS does not contain the host that ud.url uses, then - # we need to try premirrors first as using upstream is destined to fail. - if not trusted_network(d, ud.url): - return True - # the following check is to ensure incremental fetch in downloads, this is - # because the premirror might be old and does not contain the new rev required, - # and this will cause a total removal and new clone. So if we can reach to - # network, we prefer upstream over premirror, though the premirror might contain - # the new rev. - if os.path.exists(ud.clonedir): - return False - return True - - def download(self, ud, d): - """Fetch url""" - - # A current clone is preferred to either tarball, a shallow tarball is - # preferred to an out of date clone, and a missing clone will use - # either tarball. - if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d): - ud.localpath = ud.fullshallow - return - elif os.path.exists(ud.fullmirror) and self.need_update(ud, d): - if not os.path.exists(ud.clonedir): - bb.utils.mkdirhier(ud.clonedir) - runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir) - else: - tmpdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR')) - runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=tmpdir) - output = runfetchcmd("%s remote" % ud.basecmd, d, quiet=True, workdir=ud.clonedir) - if 'mirror' in output: - runfetchcmd("%s remote rm mirror" % ud.basecmd, d, workdir=ud.clonedir) - runfetchcmd("%s remote add --mirror=fetch mirror %s" % (ud.basecmd, tmpdir), d, workdir=ud.clonedir) - fetch_cmd = "LANG=C %s fetch -f --update-head-ok --progress mirror " % (ud.basecmd) - runfetchcmd(fetch_cmd, d, workdir=ud.clonedir) - repourl = self._get_repo_url(ud) - - needs_clone = False - if os.path.exists(ud.clonedir): - # The directory may exist, but not be the top level of a bare git - # repository in which case it needs to be deleted and re-cloned. - try: - # Since clones can be bare, use --absolute-git-dir instead of --show-toplevel - output = runfetchcmd("LANG=C %s rev-parse --absolute-git-dir" % ud.basecmd, d, workdir=ud.clonedir) - toplevel = output.rstrip() - - if not bb.utils.path_is_descendant(toplevel, ud.clonedir): - logger.warning("Top level directory '%s' is not a descendant of '%s'. Re-cloning", toplevel, ud.clonedir) - needs_clone = True - except bb.fetch2.FetchError as e: - logger.warning("Unable to get top level for %s (not a git directory?): %s", ud.clonedir, e) - needs_clone = True - except FileNotFoundError as e: - logger.warning("%s", e) - needs_clone = True - - if needs_clone: - shutil.rmtree(ud.clonedir) - else: - needs_clone = True - - # If the repo still doesn't exist, fallback to cloning it - if needs_clone: - # We do this since git will use a "-l" option automatically for local urls where possible, - # but it doesn't work when git/objects is a symlink, only works when it is a directory. - if repourl.startswith("file://"): - repourl_path = repourl[7:] - objects = os.path.join(repourl_path, 'objects') - if os.path.isdir(objects) and not os.path.islink(objects): - repourl = repourl_path - clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, shlex.quote(repourl), ud.clonedir) - if ud.proto.lower() != 'file': - bb.fetch2.check_network_access(d, clone_cmd, ud.url) - progresshandler = GitProgressHandler(d) - - # Try creating a fast initial shallow clone - # Enabling ud.shallow_skip_fast will skip this - # If the Git error "Server does not allow request for unadvertised object" - # occurs, shallow_skip_fast is enabled automatically. - # This may happen if the Git server does not allow the request - # or if the Git client has issues with this functionality. - if ud.shallow and not ud.shallow_skip_fast: - try: - self.clone_shallow_with_tarball(ud, d) - # When the shallow clone has succeeded, use the shallow tarball - ud.localpath = ud.fullshallow - return - except: - logger.warning("Creating fast initial shallow clone failed, try initial regular clone now.") - - # When skipping fast initial shallow or the fast inital shallow clone failed: - # Try again with an initial regular clone - runfetchcmd(clone_cmd, d, log=progresshandler) - - # Update the checkout if needed - if self.clonedir_need_update(ud, d): - output = runfetchcmd("%s remote" % ud.basecmd, d, quiet=True, workdir=ud.clonedir) - if "origin" in output: - runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir) - - runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=ud.clonedir) - - if ud.nobranch: - fetch_cmd = "LANG=C %s fetch -f --progress %s refs/*:refs/*" % (ud.basecmd, shlex.quote(repourl)) - else: - fetch_cmd = "LANG=C %s fetch -f --progress %s refs/heads/*:refs/heads/* refs/tags/*:refs/tags/*" % (ud.basecmd, shlex.quote(repourl)) - if ud.proto.lower() != 'file': - bb.fetch2.check_network_access(d, fetch_cmd, ud.url) - progresshandler = GitProgressHandler(d) - runfetchcmd(fetch_cmd, d, log=progresshandler, workdir=ud.clonedir) - runfetchcmd("%s prune-packed" % ud.basecmd, d, workdir=ud.clonedir) - runfetchcmd("%s pack-refs --all" % ud.basecmd, d, workdir=ud.clonedir) - runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d, workdir=ud.clonedir) - try: - os.unlink(ud.fullmirror) - except OSError as exc: - if exc.errno != errno.ENOENT: - raise - - if not self._contains_ref(ud, d, ud.name, ud.clonedir): - raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revision, ud.branch)) - - if ud.shallow and ud.write_shallow_tarballs: - missing_rev = self.clonedir_need_shallow_revs(ud, d) - if missing_rev: - raise bb.fetch2.FetchError("Unable to find revision %s even from upstream" % missing_rev) - - if self.lfs_need_update(ud, d): - self.lfs_fetch(ud, d, ud.clonedir, ud.revision) - - def lfs_fetch(self, ud, d, clonedir, revision, fetchall=False, progresshandler=None): - """Helper method for fetching Git LFS data""" - try: - if self._need_lfs(ud) and self._contains_lfs(ud, d, clonedir) and len(revision): - self._ensure_git_lfs(d, ud) - - # Using worktree with the revision because .lfsconfig may exists - worktree_add_cmd = "%s worktree add wt %s" % (ud.basecmd, revision) - runfetchcmd(worktree_add_cmd, d, log=progresshandler, workdir=clonedir) - lfs_fetch_cmd = "%s lfs fetch %s" % (ud.basecmd, "--all" if fetchall else "") - runfetchcmd(lfs_fetch_cmd, d, log=progresshandler, workdir=(clonedir + "/wt")) - worktree_rem_cmd = "%s worktree remove -f wt" % ud.basecmd - runfetchcmd(worktree_rem_cmd, d, log=progresshandler, workdir=clonedir) - except: - logger.warning("Fetching LFS did not succeed.") - - @contextmanager - def create_atomic(self, filename): - """Create as a temp file and move atomically into position to avoid races""" - fd, tfile = tempfile.mkstemp(dir=os.path.dirname(filename)) - try: - yield tfile - umask = os.umask(0o666) - os.umask(umask) - os.chmod(tfile, (0o666 & ~umask)) - os.rename(tfile, filename) - finally: - os.close(fd) - - def build_mirror_data(self, ud, d): - if ud.shallow and ud.write_shallow_tarballs: - if not os.path.exists(ud.fullshallow): - if os.path.islink(ud.fullshallow): - os.unlink(ud.fullshallow) - self.clone_shallow_with_tarball(ud, d) - elif ud.write_tarballs and not os.path.exists(ud.fullmirror): - if os.path.islink(ud.fullmirror): - os.unlink(ud.fullmirror) - - logger.info("Creating tarball of git repository") - with self.create_atomic(ud.fullmirror) as tfile: - mtime = runfetchcmd("{} log --all -1 --format=%cD".format(ud.basecmd), d, - quiet=True, workdir=ud.clonedir) - runfetchcmd("tar -czf %s --owner oe:0 --group oe:0 --mtime \"%s\" ." - % (tfile, mtime), d, workdir=ud.clonedir) - runfetchcmd("touch %s.done" % ud.fullmirror, d) - - def clone_shallow_with_tarball(self, ud, d): - ret = False - tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR')) - shallowclone = os.path.join(tempdir, 'git') - try: - try: - self.clone_shallow_local(ud, shallowclone, d) - except: - logger.warning("Fast shallow clone failed, try to skip fast mode now.") - bb.utils.remove(tempdir, recurse=True) - os.mkdir(tempdir) - ud.shallow_skip_fast = True - self.clone_shallow_local(ud, shallowclone, d) - logger.info("Creating tarball of git repository") - with self.create_atomic(ud.fullshallow) as tfile: - runfetchcmd("tar -czf %s ." % tfile, d, workdir=shallowclone) - runfetchcmd("touch %s.done" % ud.fullshallow, d) - ret = True - finally: - bb.utils.remove(tempdir, recurse=True) - - return ret - - def clone_shallow_local(self, ud, dest, d): - """ - Shallow fetch from ud.clonedir (${DL_DIR}/git2/ by default): - - For BB_GIT_SHALLOW_DEPTH: git fetch --depth rev - - For BB_GIT_SHALLOW_REVS: git fetch --shallow-exclude= rev - """ - - progresshandler = GitProgressHandler(d) - repourl = self._get_repo_url(ud) - bb.utils.mkdirhier(dest) - init_cmd = "%s init -q" % ud.basecmd - if ud.bareclone: - init_cmd += " --bare" - runfetchcmd(init_cmd, d, workdir=dest) - # Use repourl when creating a fast initial shallow clone - # Prefer already existing full bare clones if available - if not ud.shallow_skip_fast and not os.path.exists(ud.clonedir): - remote = shlex.quote(repourl) - else: - remote = ud.clonedir - runfetchcmd("%s remote add origin %s" % (ud.basecmd, remote), d, workdir=dest) - - # Check the histories which should be excluded - shallow_exclude = '' - for revision in ud.shallow_revs: - shallow_exclude += " --shallow-exclude=%s" % revision - - revision = ud.revision - depth = ud.shallow_depths[ud.name] - - # The --depth and --shallow-exclude can't be used together - if depth and shallow_exclude: - raise bb.fetch2.FetchError("BB_GIT_SHALLOW_REVS is set, but BB_GIT_SHALLOW_DEPTH is not 0.") - - # For nobranch, we need a ref, otherwise the commits will be - # removed, and for non-nobranch, we truncate the branch to our - # srcrev, to avoid keeping unnecessary history beyond that. - branch = ud.branch - if ud.nobranch: - ref = "refs/shallow/%s" % ud.name - elif ud.bareclone: - ref = "refs/heads/%s" % branch - else: - ref = "refs/remotes/origin/%s" % branch - - fetch_cmd = "%s fetch origin %s" % (ud.basecmd, revision) - if depth: - fetch_cmd += " --depth %s" % depth - - if shallow_exclude: - fetch_cmd += shallow_exclude - - # Advertise the revision for lower version git such as 2.25.1: - # error: Server does not allow request for unadvertised object. - # The ud.clonedir is a local temporary dir, will be removed when - # fetch is done, so we can do anything on it. - adv_cmd = 'git branch -f advertise-%s %s' % (revision, revision) - if ud.shallow_skip_fast: - runfetchcmd(adv_cmd, d, workdir=ud.clonedir) - - runfetchcmd(fetch_cmd, d, workdir=dest) - runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest) - # Fetch Git LFS data - self.lfs_fetch(ud, d, dest, ud.revision) - - # Apply extra ref wildcards - all_refs_remote = runfetchcmd("%s ls-remote origin 'refs/*'" % ud.basecmd, \ - d, workdir=dest).splitlines() - all_refs = [] - for line in all_refs_remote: - all_refs.append(line.split()[-1]) - extra_refs = [] - for r in ud.shallow_extra_refs: - if not ud.bareclone: - r = r.replace('refs/heads/', 'refs/remotes/origin/') - - if '*' in r: - matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs) - extra_refs.extend(matches) - else: - extra_refs.append(r) - - for ref in extra_refs: - ref_fetch = ref.replace('refs/heads/', '').replace('refs/remotes/origin/', '').replace('refs/tags/', '') - runfetchcmd("%s fetch origin --depth 1 %s" % (ud.basecmd, ref_fetch), d, workdir=dest) - revision = runfetchcmd("%s rev-parse FETCH_HEAD" % ud.basecmd, d, workdir=dest) - runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest) - - # The url is local ud.clonedir, set it to upstream one - runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=dest) - - def unpack(self, ud, destdir, d): - """ unpack the downloaded src to destdir""" - - subdir = ud.parm.get("subdir") - subpath = ud.parm.get("subpath") - readpathspec = "" - def_destsuffix = (d.getVar("BB_GIT_DEFAULT_DESTSUFFIX") or "git") + "/" - - if subpath: - readpathspec = ":%s" % subpath - def_destsuffix = "%s/" % os.path.basename(subpath.rstrip('/')) - - if subdir: - # If 'subdir' param exists, create a dir and use it as destination for unpack cmd - if os.path.isabs(subdir): - if not os.path.realpath(subdir).startswith(os.path.realpath(destdir)): - raise bb.fetch2.UnpackError("subdir argument isn't a subdirectory of unpack root %s" % destdir, ud.url) - destdir = subdir - else: - destdir = os.path.join(destdir, subdir) - def_destsuffix = "" - - destsuffix = ud.parm.get("destsuffix", def_destsuffix) - destdir = ud.destdir = os.path.join(destdir, destsuffix) - if os.path.exists(destdir): - bb.utils.prunedir(destdir) - if not ud.bareclone: - ud.unpack_tracer.unpack("git", destdir) - - need_lfs = self._need_lfs(ud) - - if not need_lfs: - ud.basecmd = "GIT_LFS_SKIP_SMUDGE=1 " + ud.basecmd - - source_found = False - source_error = [] - - clonedir_is_up_to_date = not self.clonedir_need_update(ud, d) - if clonedir_is_up_to_date: - runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d) - source_found = True - else: - source_error.append("clone directory not available or not up to date: " + ud.clonedir) - - if not source_found: - if ud.shallow: - if os.path.exists(ud.fullshallow): - bb.utils.mkdirhier(destdir) - runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=destdir) - source_found = True - else: - source_error.append("shallow clone not available: " + ud.fullshallow) - else: - source_error.append("shallow clone not enabled") - - if not source_found: - raise bb.fetch2.UnpackError("No up to date source found: " + "; ".join(source_error), ud.url) - - # If there is a tag parameter in the url and we also have a fixed srcrev, check the tag - # matches the revision - if 'tag' in ud.parm and sha1_re.match(ud.revision): - output = runfetchcmd("%s rev-list -n 1 %s" % (ud.basecmd, ud.parm['tag']), d, workdir=destdir) - output = output.strip() - if output != ud.revision: - # It is possible ud.revision is the revision on an annotated tag which won't match the output of rev-list - # If it resolves to the same thing there isn't a problem. - output2 = runfetchcmd("%s rev-list -n 1 %s" % (ud.basecmd, ud.revision), d, workdir=destdir) - output2 = output2.strip() - if output != output2: - raise bb.fetch2.FetchError("The revision the git tag '%s' resolved to didn't match the SRCREV in use (%s vs %s)" % (ud.parm['tag'], output, ud.revision), ud.url) - - repourl = self._get_repo_url(ud) - runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=destdir) - - if self._contains_lfs(ud, d, destdir): - if not need_lfs: - bb.note("Repository %s has LFS content but it is not being fetched" % (repourl)) - else: - self._ensure_git_lfs(d, ud) - - runfetchcmd("%s lfs install --local" % ud.basecmd, d, workdir=destdir) - - if not ud.nocheckout: - if subpath: - runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revision, readpathspec), d, - workdir=destdir) - runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d, workdir=destdir) - elif not ud.nobranch: - branchname = ud.branch - runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \ - ud.revision), d, workdir=destdir) - runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \ - branchname), d, workdir=destdir) - else: - runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revision), d, workdir=destdir) - - return True - - def clean(self, ud, d): - """ clean the git directory """ - - to_remove = [ud.localpath, ud.fullmirror, ud.fullmirror + ".done"] - # The localpath is a symlink to clonedir when it is cloned from a - # mirror, so remove both of them. - if os.path.islink(ud.localpath): - clonedir = os.path.realpath(ud.localpath) - to_remove.append(clonedir) - - # Remove shallow mirror tarball - if ud.shallow: - to_remove.append(ud.fullshallow) - to_remove.append(ud.fullshallow + ".done") - - for r in to_remove: - if os.path.exists(r) or os.path.islink(r): - bb.note('Removing %s' % r) - bb.utils.remove(r, True) - - def supports_srcrev(self): - return True - - def _contains_ref(self, ud, d, name, wd, tag=False): - cmd = "" - git_ref_name = 'refs/tags/%s' % ud.parm['tag'] if tag else ud.revision - - if ud.nobranch: - cmd = "%s log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % ( - ud.basecmd, git_ref_name) - else: - cmd = "%s branch --contains %s --list %s 2> /dev/null | wc -l" % ( - ud.basecmd, git_ref_name, ud.branch) - try: - output = runfetchcmd(cmd, d, quiet=True, workdir=wd) - except bb.fetch2.FetchError: - return False - if len(output.split()) > 1: - raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output)) - return output.split()[0] != "0" - - def _lfs_objects_downloaded(self, ud, d, wd): - """ - Verifies whether the LFS objects for requested revisions have already been downloaded - """ - # Bail out early if this repository doesn't use LFS - if not self._contains_lfs(ud, d, wd): - return True - - self._ensure_git_lfs(d, ud) - - # The Git LFS specification specifies ([1]) the LFS folder layout so it should be safe to check for file - # existence. - # [1] https://github.com/git-lfs/git-lfs/blob/main/docs/spec.md#intercepting-git - cmd = "%s lfs ls-files -l %s" \ - % (ud.basecmd, ud.revision) - output = runfetchcmd(cmd, d, quiet=True, workdir=wd).rstrip() - # Do not do any further matching if no objects are managed by LFS - if not output: - return True - - # Match all lines beginning with the hexadecimal OID - oid_regex = re.compile("^(([a-fA-F0-9]{2})([a-fA-F0-9]{2})[A-Fa-f0-9]+)") - for line in output.split("\n"): - oid = re.search(oid_regex, line) - if not oid: - bb.warn("git lfs ls-files output '%s' did not match expected format." % line) - if not os.path.exists(os.path.join(wd, "lfs", "objects", oid.group(2), oid.group(3), oid.group(1))): - return False - - return True - - def _need_lfs(self, ud): - return ud.parm.get("lfs", "1") == "1" - - def _contains_lfs(self, ud, d, wd): - """ - Check if the repository has 'lfs' (large file) content - """ - cmd = "%s grep '^[^#].*lfs' %s:.gitattributes | wc -l" % ( - ud.basecmd, ud.revision) - - try: - output = runfetchcmd(cmd, d, quiet=True, workdir=wd) - if int(output) > 0: - return True - except (bb.fetch2.FetchError,ValueError): - pass - return False - - def _ensure_git_lfs(self, d, ud): - """ - Ensures that git-lfs is available, raising a FetchError if it isn't. - """ - if shutil.which("git-lfs", path=d.getVar('PATH')) is None: - raise bb.fetch2.FetchError( - "Repository %s has LFS content, install git-lfs on host to download (or set lfs=0 " - "to ignore it)" % self._get_repo_url(ud)) - - def _get_repo_url(self, ud): - """ - Return the repository URL - """ - # Note that we do not support passwords directly in the git urls. There are several - # reasons. SRC_URI can be written out to things like buildhistory and people don't - # want to leak passwords like that. Its also all too easy to share metadata without - # removing the password. ssh keys, ~/.netrc and ~/.ssh/config files can be used as - # alternatives so we will not take patches adding password support here. - if ud.user: - username = ud.user + '@' - else: - username = "" - return "%s://%s%s%s" % (ud.proto, username, ud.host, urllib.parse.quote(ud.path)) - - def _revision_key(self, ud, d, name): - """ - Return a unique key for the url - """ - # Collapse adjacent slashes - return "git:" + ud.host + slash_re.sub(".", ud.path) + ud.unresolvedrev - - def _lsremote(self, ud, d, search): - """ - Run git ls-remote with the specified search string - """ - # Prevent recursion e.g. in OE if SRCPV is in PV, PV is in WORKDIR, - # and WORKDIR is in PATH (as a result of RSS), our call to - # runfetchcmd() exports PATH so this function will get called again (!) - # In this scenario the return call of the function isn't actually - # important - WORKDIR isn't needed in PATH to call git ls-remote - # anyway. - if d.getVar('_BB_GIT_IN_LSREMOTE', False): - return '' - d.setVar('_BB_GIT_IN_LSREMOTE', '1') - try: - repourl = self._get_repo_url(ud) - cmd = "%s ls-remote %s %s" % \ - (ud.basecmd, shlex.quote(repourl), search) - if ud.proto.lower() != 'file': - bb.fetch2.check_network_access(d, cmd, repourl) - output = runfetchcmd(cmd, d, True) - if not output: - raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url) - finally: - d.delVar('_BB_GIT_IN_LSREMOTE') - return output - - def _latest_revision(self, ud, d, name): - """ - Compute the HEAD revision for the url - """ - if not d.getVar("__BBSRCREV_SEEN"): - raise bb.fetch2.FetchError("Recipe uses a floating tag/branch '%s' for repo '%s' without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE)." % (ud.unresolvedrev, ud.host+ud.path)) - - # Ensure we mark as not cached - bb.fetch2.mark_recipe_nocache(d) - - output = self._lsremote(ud, d, "") - # Tags of the form ^{} may not work, need to fallback to other form - if ud.unresolvedrev[:5] == "refs/" or ud.usehead: - head = ud.unresolvedrev - tag = ud.unresolvedrev - else: - head = "refs/heads/%s" % ud.unresolvedrev - tag = "refs/tags/%s" % ud.unresolvedrev - for s in [head, tag + "^{}", tag]: - for l in output.strip().split('\n'): - sha1, ref = l.split() - if s == ref: - return sha1 - raise bb.fetch2.FetchError("Unable to resolve '%s' in upstream git repository in git ls-remote output for %s" % \ - (ud.unresolvedrev, ud.host+ud.path)) - - def latest_versionstring(self, ud, d): - """ - Compute the latest release name like "x.y.x" in "x.y.x+gitHASH" - by searching through the tags output of ls-remote, comparing - versions and returning the highest match. - """ - pupver = ('', '') - - try: - output = self._lsremote(ud, d, "refs/tags/*") - except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e: - bb.note("Could not list remote: %s" % str(e)) - return pupver - - rev_tag_re = re.compile(r"([0-9a-f]{40})\s+refs/tags/(.*)") - pver_re = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P([0-9][\.|_]?)+)") - nonrel_re = re.compile(r"(alpha|beta|rc|final)+") - - verstring = "" - for line in output.split("\n"): - if not line: - break - - m = rev_tag_re.match(line) - if not m: - continue - - (revision, tag) = m.groups() - - # Ignore non-released branches - if nonrel_re.search(tag): - continue - - # search for version in the line - m = pver_re.search(tag) - if not m: - continue - - pver = m.group('pver').replace("_", ".") - - if verstring and bb.utils.vercmp(("0", pver, ""), ("0", verstring, "")) < 0: - continue - - verstring = pver - pupver = (verstring, revision) - - return pupver - - def _build_revision(self, ud, d, name): - return ud.revision - - def gitpkgv_revision(self, ud, d, name): - """ - Return a sortable revision number by counting commits in the history - Based on gitpkgv.bblass in meta-openembedded - """ - rev = ud.revision - localpath = ud.localpath - rev_file = os.path.join(localpath, "oe-gitpkgv_" + rev) - if not os.path.exists(localpath): - commits = None - else: - if not os.path.exists(rev_file) or not os.path.getsize(rev_file): - commits = bb.fetch2.runfetchcmd( - "git rev-list %s -- | wc -l" % shlex.quote(rev), - d, quiet=True).strip().lstrip('0') - if commits: - open(rev_file, "w").write("%d\n" % int(commits)) - else: - commits = open(rev_file, "r").readline(128).strip() - if commits: - return False, "%s+%s" % (commits, rev[:7]) - else: - return True, str(rev) - - def checkstatus(self, fetch, ud, d): - try: - self._lsremote(ud, d, "") - return True - except bb.fetch2.FetchError: - return False diff --git a/bitbake/lib/bb/fetch2/gitannex.py b/bitbake/lib/bb/fetch2/gitannex.py deleted file mode 100644 index 80a808d88f..0000000000 --- a/bitbake/lib/bb/fetch2/gitannex.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -BitBake 'Fetch' git annex implementation -""" - -# Copyright (C) 2014 Otavio Salvador -# Copyright (C) 2014 O.S. Systems Software LTDA. -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import bb -from bb.fetch2.git import Git -from bb.fetch2 import runfetchcmd - -class GitANNEX(Git): - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with git. - """ - return ud.type in ['gitannex'] - - def urldata_init(self, ud, d): - super(GitANNEX, self).urldata_init(ud, d) - if ud.shallow: - ud.shallow_extra_refs += ['refs/heads/git-annex', 'refs/heads/synced/*'] - - def uses_annex(self, ud, d, wd): - for name in ud.names: - try: - runfetchcmd("%s rev-list git-annex" % (ud.basecmd), d, quiet=True, workdir=wd) - return True - except bb.fetch.FetchError: - pass - - return False - - def update_annex(self, ud, d, wd): - try: - runfetchcmd("%s annex get --all" % (ud.basecmd), d, quiet=True, workdir=wd) - except bb.fetch.FetchError: - return False - runfetchcmd("chmod u+w -R %s/annex" % (ud.clonedir), d, quiet=True, workdir=wd) - - return True - - def download(self, ud, d): - Git.download(self, ud, d) - - if not ud.shallow or ud.localpath != ud.fullshallow: - if self.uses_annex(ud, d, ud.clonedir): - self.update_annex(ud, d, ud.clonedir) - - def clone_shallow_local(self, ud, dest, d): - super(GitANNEX, self).clone_shallow_local(ud, dest, d) - - try: - runfetchcmd("%s annex init" % ud.basecmd, d, workdir=dest) - except bb.fetch.FetchError: - pass - - if self.uses_annex(ud, d, dest): - runfetchcmd("%s annex get" % ud.basecmd, d, workdir=dest) - runfetchcmd("chmod u+w -R %s/.git/annex" % (dest), d, quiet=True, workdir=dest) - - def unpack(self, ud, destdir, d): - Git.unpack(self, ud, destdir, d) - - try: - runfetchcmd("%s annex init" % (ud.basecmd), d, workdir=ud.destdir) - except bb.fetch.FetchError: - pass - - annex = self.uses_annex(ud, d, ud.destdir) - if annex: - runfetchcmd("%s annex get" % (ud.basecmd), d, workdir=ud.destdir) - runfetchcmd("chmod u+w -R %s/.git/annex" % (ud.destdir), d, quiet=True, workdir=ud.destdir) - diff --git a/bitbake/lib/bb/fetch2/gitsm.py b/bitbake/lib/bb/fetch2/gitsm.py deleted file mode 100644 index 5869e1b99b..0000000000 --- a/bitbake/lib/bb/fetch2/gitsm.py +++ /dev/null @@ -1,278 +0,0 @@ -""" -BitBake 'Fetch' git submodules implementation - -Inherits from and extends the Git fetcher to retrieve submodules of a git repository -after cloning. - -SRC_URI = "gitsm://" - -See the Git fetcher, git://, for usage documentation. - -NOTE: Switching a SRC_URI from "git://" to "gitsm://" requires a clean of your recipe. - -""" - -# Copyright (C) 2013 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import bb -import copy -import shutil -import tempfile -from bb.fetch2.git import Git -from bb.fetch2 import runfetchcmd -from bb.fetch2 import logger -from bb.fetch2 import Fetch - -class GitSM(Git): - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with git. - """ - return ud.type in ['gitsm'] - - def process_submodules(self, ud, workdir, function, d): - """ - Iterate over all of the submodules in this repository and execute - the 'function' for each of them. - """ - - submodules = [] - paths = {} - revision = {} - uris = {} - subrevision = {} - - def parse_gitmodules(gitmodules): - modules = {} - module = "" - for line in gitmodules.splitlines(): - if line.startswith('[submodule'): - module = line.split('"')[1] - modules[module] = {} - elif module and line.strip().startswith('path'): - path = line.split('=')[1].strip() - modules[module]['path'] = path - elif module and line.strip().startswith('url'): - url = line.split('=')[1].strip() - modules[module]['url'] = url - return modules - - # Collect the defined submodules, and their attributes - try: - gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revision), d, quiet=True, workdir=workdir) - except: - # No submodules to update - gitmodules = "" - - for m, md in parse_gitmodules(gitmodules).items(): - try: - module_hash = runfetchcmd("%s ls-tree -z -d %s %s" % (ud.basecmd, ud.revision, md['path']), d, quiet=True, workdir=workdir) - except: - # If the command fails, we don't have a valid file to check. If it doesn't - # fail -- it still might be a failure, see next check... - module_hash = "" - - if not module_hash: - logger.debug("submodule %s is defined, but is not initialized in the repository. Skipping", m) - continue - - submodules.append(m) - paths[m] = md['path'] - revision[m] = ud.revision - uris[m] = md['url'] - subrevision[m] = module_hash.split()[2] - - # Convert relative to absolute uri based on parent uri - if uris[m].startswith('..') or uris[m].startswith('./'): - newud = copy.copy(ud) - newud.path = os.path.normpath(os.path.join(newud.path, uris[m])) - uris[m] = Git._get_repo_url(self, newud) - - for module in submodules: - # Translate the module url into a SRC_URI - - if "://" in uris[module]: - # Properly formated URL already - proto = uris[module].split(':', 1)[0] - url = uris[module].replace('%s:' % proto, 'gitsm:', 1) - else: - if ":" in uris[module]: - # Most likely an SSH style reference - proto = "ssh" - if ":/" in uris[module]: - # Absolute reference, easy to convert.. - url = "gitsm://" + uris[module].replace(':/', '/', 1) - else: - # Relative reference, no way to know if this is right! - logger.warning("Submodule included by %s refers to relative ssh reference %s. References may fail if not absolute." % (ud.url, uris[module])) - url = "gitsm://" + uris[module].replace(':', '/', 1) - else: - # This has to be a file reference - proto = "file" - url = "gitsm://" + uris[module] - if url.endswith("{}{}".format(ud.host, ud.path)): - raise bb.fetch2.FetchError("Submodule refers to the parent repository. This will cause deadlock situation in current version of Bitbake." \ - "Consider using git fetcher instead.") - - url += ';protocol=%s' % proto - url += ";name=%s" % module - url += ";subpath=%s" % module - url += ";nobranch=1" - url += ";lfs=%s" % ("1" if self._need_lfs(ud) else "0") - # Note that adding "user=" here to give credentials to the - # submodule is not supported. Since using SRC_URI to give git:// - # URL a password is not supported, one have to use one of the - # recommended way (eg. ~/.netrc or SSH config) which does specify - # the user (See comment in git.py). - # So, we will not take patches adding "user=" support here. - - ld = d.createCopy() - # Not necessary to set SRC_URI, since we're passing the URI to - # Fetch. - #ld.setVar('SRC_URI', url) - ld.setVar('SRCREV_%s' % module, subrevision[module]) - - # Workaround for issues with SRCPV/SRCREV_FORMAT errors - # error refer to 'multiple' repositories. Only the repository - # in the original SRC_URI actually matters... - ld.setVar('SRCPV', d.getVar('SRCPV')) - ld.setVar('SRCREV_FORMAT', module) - - function(ud, url, module, paths[module], workdir, ld) - - return submodules != [] - - def call_process_submodules(self, ud, d, extra_check, subfunc): - # If we're using a shallow mirror tarball it needs to be - # unpacked temporarily so that we can examine the .gitmodules file - # Unpack even when ud.clonedir is not available, - # which may occur during a fast shallow clone - unpack = extra_check or not os.path.exists(ud.clonedir) - if ud.shallow and os.path.exists(ud.fullshallow) and unpack: - tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR")) - try: - runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir) - self.process_submodules(ud, tmpdir, subfunc, d) - finally: - shutil.rmtree(tmpdir) - else: - self.process_submodules(ud, ud.clonedir, subfunc, d) - - def need_update(self, ud, d): - if Git.need_update(self, ud, d): - return True - - need_update_list = [] - def need_update_submodule(ud, url, module, modpath, workdir, d): - url += ";bareclone=1;nobranch=1" - - try: - newfetch = Fetch([url], d, cache=False) - new_ud = newfetch.ud[url] - if new_ud.method.need_update(new_ud, d): - need_update_list.append(modpath) - except Exception as e: - logger.error('gitsm: submodule update check failed: %s %s' % (type(e).__name__, str(e))) - need_update_result = True - - self.call_process_submodules(ud, d, not os.path.exists(ud.clonedir), need_update_submodule) - - if need_update_list: - logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list))) - return True - - return False - - def download(self, ud, d): - def download_submodule(ud, url, module, modpath, workdir, d): - url += ";bareclone=1;nobranch=1" - - # Is the following still needed? - #url += ";nocheckout=1" - - try: - newfetch = Fetch([url], d, cache=False) - newfetch.download() - except Exception as e: - logger.error('gitsm: submodule download failed: %s %s' % (type(e).__name__, str(e))) - raise - - Git.download(self, ud, d) - self.call_process_submodules(ud, d, self.need_update(ud, d), download_submodule) - - def unpack(self, ud, destdir, d): - def unpack_submodules(ud, url, module, modpath, workdir, d): - url += ";bareclone=1;nobranch=1" - - # Figure out where we clone over the bare submodules... - if ud.bareclone: - repo_conf = ud.destdir - else: - repo_conf = os.path.join(ud.destdir, '.git') - - try: - newfetch = Fetch([url], d, cache=False) - # modpath is needed by unpack tracer to calculate submodule - # checkout dir - new_ud = newfetch.ud[url] - new_ud.modpath = modpath - newfetch.unpack(root=os.path.dirname(os.path.join(repo_conf, 'modules', module))) - except Exception as e: - logger.error('gitsm: submodule unpack failed: %s %s' % (type(e).__name__, str(e))) - raise - - local_path = newfetch.localpath(url) - - # Correct the submodule references to the local download version... - runfetchcmd("%(basecmd)s config submodule.%(module)s.url %(url)s" % {'basecmd': ud.basecmd, 'module': module, 'url' : local_path}, d, workdir=ud.destdir) - - if ud.shallow: - runfetchcmd("%(basecmd)s config submodule.%(module)s.shallow true" % {'basecmd': ud.basecmd, 'module': module}, d, workdir=ud.destdir) - - # Ensure the submodule repository is NOT set to bare, since we're checking it out... - try: - runfetchcmd("%s config core.bare false" % (ud.basecmd), d, quiet=True, workdir=os.path.join(repo_conf, 'modules', module)) - except: - logger.error("Unable to set git config core.bare to false for %s" % os.path.join(repo_conf, 'modules', module)) - raise - - Git.unpack(self, ud, destdir, d) - - ret = self.process_submodules(ud, ud.destdir, unpack_submodules, d) - - if not ud.bareclone and ret: - cmdprefix = "" - # Avoid LFS smudging (replacing the LFS pointers with the actual content) when LFS shouldn't be used but git-lfs is installed. - if not self._need_lfs(ud): - cmdprefix = "GIT_LFS_SKIP_SMUDGE=1 " - runfetchcmd("%s%s submodule update --recursive --no-fetch" % (cmdprefix, ud.basecmd), d, quiet=True, workdir=ud.destdir) - def clean(self, ud, d): - def clean_submodule(ud, url, module, modpath, workdir, d): - url += ";bareclone=1;nobranch=1" - try: - newfetch = Fetch([url], d, cache=False) - newfetch.clean() - except Exception as e: - logger.warning('gitsm: submodule clean failed: %s %s' % (type(e).__name__, str(e))) - - self.call_process_submodules(ud, d, True, clean_submodule) - - # Clean top git dir - Git.clean(self, ud, d) - - def implicit_urldata(self, ud, d): - import subprocess - - urldata = [] - def add_submodule(ud, url, module, modpath, workdir, d): - url += ";bareclone=1;nobranch=1" - newfetch = Fetch([url], d, cache=False) - urldata.extend(newfetch.expanded_urldata()) - - self.call_process_submodules(ud, d, ud.method.need_update(ud, d), add_submodule) - - return urldata diff --git a/bitbake/lib/bb/fetch2/gomod.py b/bitbake/lib/bb/fetch2/gomod.py deleted file mode 100644 index 53c1d8d115..0000000000 --- a/bitbake/lib/bb/fetch2/gomod.py +++ /dev/null @@ -1,273 +0,0 @@ -""" -BitBake 'Fetch' implementation for Go modules - -The gomod/gomodgit fetchers are used to download Go modules to the module cache -from a module proxy or directly from a version control repository. - -Example SRC_URI: - -SRC_URI += "gomod://golang.org/x/net;version=v0.9.0;sha256sum=..." -SRC_URI += "gomodgit://golang.org/x/net;version=v0.9.0;repo=go.googlesource.com/net;srcrev=..." - -Required SRC_URI parameters: - -- version - The version of the module. - -Optional SRC_URI parameters: - -- mod - Fetch and unpack the go.mod file only instead of the complete module. - The go command may need to download go.mod files for many different modules - when computing the build list, and go.mod files are much smaller than - module zip files. - The default is "0", set mod=1 for the go.mod file only. - -- sha256sum - The checksum of the module zip file, or the go.mod file in case of fetching - only the go.mod file. Alternatively, set the SRC_URI varible flag for - "module@version.sha256sum". - -- protocol - The method used when fetching directly from a version control repository. - The default is "https" for git. - -- repo - The URL when fetching directly from a version control repository. Required - when the URL is different from the module path. - -- srcrev - The revision identifier used when fetching directly from a version control - repository. Alternatively, set the SRCREV varible for "module@version". - -- subdir - The module subdirectory when fetching directly from a version control - repository. Required when the module is not located in the root of the - repository. - -Related variables: - -- GO_MOD_PROXY - The module proxy used by the fetcher. - -- GO_MOD_CACHE_DIR - The directory where the module cache is located. - This must match the exported GOMODCACHE variable for the go command to find - the downloaded modules. - -See the Go modules reference, https://go.dev/ref/mod, for more information -about the module cache, module proxies and version control systems. -""" - -import hashlib -import os -import re -import shutil -import subprocess -import zipfile - -import bb -from bb.fetch2 import FetchError -from bb.fetch2 import MissingParameterError -from bb.fetch2 import runfetchcmd -from bb.fetch2 import subprocess_setup -from bb.fetch2.git import Git -from bb.fetch2.wget import Wget - - -def escape(path): - """Escape capital letters using exclamation points.""" - return re.sub(r'([A-Z])', lambda m: '!' + m.group(1).lower(), path) - - -class GoMod(Wget): - """Class to fetch Go modules from a Go module proxy via wget""" - - def supports(self, ud, d): - """Check to see if a given URL is for this fetcher.""" - return ud.type == 'gomod' - - def urldata_init(self, ud, d): - """Set up to download the module from the module proxy. - - Set up to download the module zip file to the module cache directory - and unpack the go.mod file (unless downloading only the go.mod file): - - cache/download//@v/.zip: The module zip file. - cache/download//@v/.mod: The go.mod file. - """ - - proxy = d.getVar('GO_MOD_PROXY') or 'proxy.golang.org' - moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod' - - if 'version' not in ud.parm: - raise MissingParameterError('version', ud.url) - - module = ud.host - if ud.path != '/': - module += ud.path - ud.parm['module'] = module - version = ud.parm['version'] - - # Set URL and filename for wget download - if ud.parm.get('mod', '0') == '1': - ext = '.mod' - else: - ext = '.zip' - path = escape(f"{module}/@v/{version}{ext}") - ud.url = bb.fetch2.encodeurl( - ('https', proxy, '/' + path, None, None, None)) - ud.parm['downloadfilename'] = f"{module.replace('/', '.')}@{version}{ext}" - - # Set name for checksum verification - ud.parm['name'] = f"{module}@{version}" - - # Set path for unpack - ud.parm['unpackpath'] = os.path.join(moddir, 'cache/download', path) - - super().urldata_init(ud, d) - - def unpack(self, ud, rootdir, d): - """Unpack the module in the module cache.""" - - # Unpack the module zip file or go.mod file - unpackpath = os.path.join(rootdir, ud.parm['unpackpath']) - unpackdir = os.path.dirname(unpackpath) - bb.utils.mkdirhier(unpackdir) - ud.unpack_tracer.unpack("file-copy", unpackdir) - cmd = f"cp {ud.localpath} {unpackpath}" - path = d.getVar('PATH') - if path: - cmd = f"PATH={path} {cmd}" - name = os.path.basename(unpackpath) - bb.note(f"Unpacking {name} to {unpackdir}/") - subprocess.check_call(cmd, shell=True, preexec_fn=subprocess_setup) - - if name.endswith('.zip'): - # Unpack the go.mod file from the zip file - module = ud.parm['module'] - name = name.rsplit('.', 1)[0] + '.mod' - bb.note(f"Unpacking {name} to {unpackdir}/") - with zipfile.ZipFile(ud.localpath) as zf: - with open(os.path.join(unpackdir, name), mode='wb') as mf: - try: - f = module + '@' + ud.parm['version'] + '/go.mod' - shutil.copyfileobj(zf.open(f), mf) - except KeyError: - # If the module does not have a go.mod file, synthesize - # one containing only a module statement. - mf.write(f'module {module}\n'.encode()) - - -class GoModGit(Git): - """Class to fetch Go modules directly from a git repository""" - - def supports(self, ud, d): - """Check to see if a given URL is for this fetcher.""" - return ud.type == 'gomodgit' - - def urldata_init(self, ud, d): - """Set up to download the module from the git repository. - - Set up to download the git repository to the module cache directory and - unpack the module zip file and the go.mod file: - - cache/vcs/: The bare git repository. - cache/download//@v/.zip: The module zip file. - cache/download//@v/.mod: The go.mod file. - """ - - moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod' - - if 'version' not in ud.parm: - raise MissingParameterError('version', ud.url) - - module = ud.host - if ud.path != '/': - module += ud.path - ud.parm['module'] = module - - # Set host, path and srcrev for git download - if 'repo' in ud.parm: - repo = ud.parm['repo'] - idx = repo.find('/') - if idx != -1: - ud.host = repo[:idx] - ud.path = repo[idx:] - else: - ud.host = repo - ud.path = '' - if 'protocol' not in ud.parm: - ud.parm['protocol'] = 'https' - ud.name = f"{module}@{ud.parm['version']}" - srcrev = d.getVar('SRCREV_' + ud.name) - if srcrev: - if 'srcrev' not in ud.parm: - ud.parm['srcrev'] = srcrev - else: - if 'srcrev' in ud.parm: - d.setVar('SRCREV_' + ud.name, ud.parm['srcrev']) - if 'branch' not in ud.parm: - ud.parm['nobranch'] = '1' - - # Set subpath, subdir and bareclone for git unpack - if 'subdir' in ud.parm: - ud.parm['subpath'] = ud.parm['subdir'] - key = f"git3:{ud.parm['protocol']}://{ud.host}{ud.path}".encode() - ud.parm['key'] = key - ud.parm['subdir'] = os.path.join(moddir, 'cache/vcs', - hashlib.sha256(key).hexdigest()) - ud.parm['bareclone'] = '1' - - super().urldata_init(ud, d) - - def unpack(self, ud, rootdir, d): - """Unpack the module in the module cache.""" - - # Unpack the bare git repository - super().unpack(ud, rootdir, d) - - moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod' - - # Create the info file - module = ud.parm['module'] - repodir = os.path.join(rootdir, ud.parm['subdir']) - with open(repodir + '.info', 'wb') as f: - f.write(ud.parm['key']) - - # Unpack the go.mod file from the repository - unpackdir = os.path.join(rootdir, moddir, 'cache/download', - escape(module), '@v') - bb.utils.mkdirhier(unpackdir) - srcrev = ud.parm['srcrev'] - version = ud.parm['version'] - escaped_version = escape(version) - cmd = f"git ls-tree -r --name-only '{srcrev}'" - if 'subpath' in ud.parm: - cmd += f" '{ud.parm['subpath']}'" - files = runfetchcmd(cmd, d, workdir=repodir).split() - name = escaped_version + '.mod' - bb.note(f"Unpacking {name} to {unpackdir}/") - with open(os.path.join(unpackdir, name), mode='wb') as mf: - f = 'go.mod' - if 'subpath' in ud.parm: - f = os.path.join(ud.parm['subpath'], f) - if f in files: - cmd = ['git', 'cat-file', 'blob', srcrev + ':' + f] - subprocess.check_call(cmd, stdout=mf, cwd=repodir, - preexec_fn=subprocess_setup) - else: - # If the module does not have a go.mod file, synthesize one - # containing only a module statement. - mf.write(f'module {module}\n'.encode()) - - # Synthesize the module zip file from the repository - name = escaped_version + '.zip' - bb.note(f"Unpacking {name} to {unpackdir}/") - with zipfile.ZipFile(os.path.join(unpackdir, name), mode='w') as zf: - prefix = module + '@' + version + '/' - for f in files: - cmd = ['git', 'cat-file', 'blob', srcrev + ':' + f] - data = subprocess.check_output(cmd, cwd=repodir, - preexec_fn=subprocess_setup) - zf.writestr(prefix + f, data) diff --git a/bitbake/lib/bb/fetch2/hg.py b/bitbake/lib/bb/fetch2/hg.py deleted file mode 100644 index cbff8c490c..0000000000 --- a/bitbake/lib/bb/fetch2/hg.py +++ /dev/null @@ -1,264 +0,0 @@ -""" -BitBake 'Fetch' implementation for mercurial DRCS (hg). - -""" - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2004 Marcin Juszkiewicz -# Copyright (C) 2007 Robert Schuster -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig -# - -import os -import bb -import errno -from bb.fetch2 import FetchMethod -from bb.fetch2 import FetchError -from bb.fetch2 import MissingParameterError -from bb.fetch2 import runfetchcmd -from bb.fetch2 import logger - -class Hg(FetchMethod): - """Class to fetch from mercurial repositories""" - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with mercurial. - """ - return ud.type in ['hg'] - - def supports_checksum(self, urldata): - """ - Don't require checksums for local archives created from - repository checkouts. - """ - return False - - def urldata_init(self, ud, d): - """ - init hg specific variable within url data - """ - if not "module" in ud.parm: - raise MissingParameterError('module', ud.url) - - ud.module = ud.parm["module"] - - if 'protocol' in ud.parm: - ud.proto = ud.parm['protocol'] - elif not ud.host: - ud.proto = 'file' - else: - ud.proto = "hg" - - # Create paths to mercurial checkouts - hgsrcname = '%s_%s_%s' % (ud.module.replace('/', '.'), \ - ud.host, ud.path.replace('/', '.')) - mirrortarball = 'hg_%s.tar.gz' % hgsrcname - ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball) - ud.mirrortarballs = [mirrortarball] - - hgdir = d.getVar("HGDIR") or (d.getVar("DL_DIR") + "/hg") - ud.pkgdir = os.path.join(hgdir, hgsrcname) - ud.moddir = os.path.join(ud.pkgdir, ud.module) - ud.localfile = ud.moddir - ud.basecmd = d.getVar("FETCHCMD_hg") or "/usr/bin/env hg" - - ud.setup_revisions(d) - - if 'rev' in ud.parm: - ud.revision = ud.parm['rev'] - elif not ud.revision: - ud.revision = self.latest_revision(ud, d) - - ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") - - def need_update(self, ud, d): - revTag = ud.parm.get('rev', 'tip') - if revTag == "tip": - return True - if not os.path.exists(ud.localpath): - return True - return False - - def try_premirror(self, ud, d): - # If we don't do this, updating an existing checkout with only premirrors - # is not possible - if bb.utils.to_boolean(d.getVar("BB_FETCH_PREMIRRORONLY")): - return True - if os.path.exists(ud.moddir): - return False - return True - - def _buildhgcommand(self, ud, d, command): - """ - Build up an hg commandline based on ud - command is "fetch", "update", "info" - """ - - proto = ud.parm.get('protocol', 'http') - - host = ud.host - if proto == "file": - host = "/" - ud.host = "localhost" - - if not ud.user: - hgroot = host + ud.path - else: - if ud.pswd: - hgroot = ud.user + ":" + ud.pswd + "@" + host + ud.path - else: - hgroot = ud.user + "@" + host + ud.path - - if command == "info": - return "%s identify -i %s://%s/%s" % (ud.basecmd, proto, hgroot, ud.module) - - options = []; - - # Don't specify revision for the fetch; clone the entire repo. - # This avoids an issue if the specified revision is a tag, because - # the tag actually exists in the specified revision + 1, so it won't - # be available when used in any successive commands. - if ud.revision and command != "fetch": - options.append("-r %s" % ud.revision) - - if command == "fetch": - if ud.user and ud.pswd: - cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" clone %s %s://%s/%s %s" % (ud.basecmd, ud.user, ud.pswd, proto, " ".join(options), proto, hgroot, ud.module, ud.module) - else: - cmd = "%s clone %s %s://%s/%s %s" % (ud.basecmd, " ".join(options), proto, hgroot, ud.module, ud.module) - elif command == "pull": - # do not pass options list; limiting pull to rev causes the local - # repo not to contain it and immediately following "update" command - # will crash - if ud.user and ud.pswd: - cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull" % (ud.basecmd, ud.user, ud.pswd, proto) - else: - cmd = "%s pull" % (ud.basecmd) - elif command == "update" or command == "up": - if ud.user and ud.pswd: - cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" update -C %s" % (ud.basecmd, ud.user, ud.pswd, proto, " ".join(options)) - else: - cmd = "%s update -C %s" % (ud.basecmd, " ".join(options)) - else: - raise FetchError("Invalid hg command %s" % command, ud.url) - - return cmd - - def download(self, ud, d): - """Fetch url""" - - logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'") - - # If the checkout doesn't exist and the mirror tarball does, extract it - if not os.path.exists(ud.pkgdir) and os.path.exists(ud.fullmirror): - bb.utils.mkdirhier(ud.pkgdir) - runfetchcmd("tar -xzf %s" % (ud.fullmirror), d, workdir=ud.pkgdir) - - if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK): - # Found the source, check whether need pull - updatecmd = self._buildhgcommand(ud, d, "update") - logger.debug("Running %s", updatecmd) - try: - runfetchcmd(updatecmd, d, workdir=ud.moddir) - except bb.fetch2.FetchError: - # Runnning pull in the repo - pullcmd = self._buildhgcommand(ud, d, "pull") - logger.info("Pulling " + ud.url) - # update sources there - logger.debug("Running %s", pullcmd) - bb.fetch2.check_network_access(d, pullcmd, ud.url) - runfetchcmd(pullcmd, d, workdir=ud.moddir) - try: - os.unlink(ud.fullmirror) - except OSError as exc: - if exc.errno != errno.ENOENT: - raise - - # No source found, clone it. - if not os.path.exists(ud.moddir): - fetchcmd = self._buildhgcommand(ud, d, "fetch") - logger.info("Fetch " + ud.url) - # check out sources there - bb.utils.mkdirhier(ud.pkgdir) - logger.debug("Running %s", fetchcmd) - bb.fetch2.check_network_access(d, fetchcmd, ud.url) - runfetchcmd(fetchcmd, d, workdir=ud.pkgdir) - - # Even when we clone (fetch), we still need to update as hg's clone - # won't checkout the specified revision if its on a branch - updatecmd = self._buildhgcommand(ud, d, "update") - logger.debug("Running %s", updatecmd) - runfetchcmd(updatecmd, d, workdir=ud.moddir) - - def clean(self, ud, d): - """ Clean the hg dir """ - - bb.utils.remove(ud.localpath, True) - bb.utils.remove(ud.fullmirror) - bb.utils.remove(ud.fullmirror + ".done") - - def supports_srcrev(self): - return True - - def _latest_revision(self, ud, d, name): - """ - Compute tip revision for the url - """ - bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"), ud.url) - output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d) - return output.strip() - - def _build_revision(self, ud, d, name): - return ud.revision - - def _revision_key(self, ud, d, name): - """ - Return a unique key for the url - """ - return "hg:" + ud.moddir - - def build_mirror_data(self, ud, d): - # Generate a mirror tarball if needed - if ud.write_tarballs == "1" and not os.path.exists(ud.fullmirror): - # it's possible that this symlink points to read-only filesystem with PREMIRROR - if os.path.islink(ud.fullmirror): - os.unlink(ud.fullmirror) - - logger.info("Creating tarball of hg repository") - runfetchcmd("tar -czf %s %s" % (ud.fullmirror, ud.module), d, workdir=ud.pkgdir) - runfetchcmd("touch %s.done" % (ud.fullmirror), d, workdir=ud.pkgdir) - - def localpath(self, ud, d): - return ud.pkgdir - - def unpack(self, ud, destdir, d): - """ - Make a local clone or export for the url - """ - - revflag = "-r %s" % ud.revision - subdir = ud.parm.get("destsuffix", ud.module) - codir = "%s/%s" % (destdir, subdir) - ud.unpack_tracer.unpack("hg", codir) - - scmdata = ud.parm.get("scmdata", "") - if scmdata != "nokeep": - proto = ud.parm.get('protocol', 'http') - if not os.access(os.path.join(codir, '.hg'), os.R_OK): - logger.debug2("Unpack: creating new hg repository in '" + codir + "'") - runfetchcmd("%s init %s" % (ud.basecmd, codir), d) - logger.debug2("Unpack: updating source in '" + codir + "'") - if ud.user and ud.pswd: - runfetchcmd("%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull %s" % (ud.basecmd, ud.user, ud.pswd, proto, ud.moddir), d, workdir=codir) - else: - runfetchcmd("%s pull %s" % (ud.basecmd, ud.moddir), d, workdir=codir) - if ud.user and ud.pswd: - runfetchcmd("%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" up -C %s" % (ud.basecmd, ud.user, ud.pswd, proto, revflag), d, workdir=codir) - else: - runfetchcmd("%s up -C %s" % (ud.basecmd, revflag), d, workdir=codir) - else: - logger.debug2("Unpack: extracting source to '" + codir + "'") - runfetchcmd("%s archive -t files %s %s" % (ud.basecmd, revflag, codir), d, workdir=ud.moddir) diff --git a/bitbake/lib/bb/fetch2/local.py b/bitbake/lib/bb/fetch2/local.py deleted file mode 100644 index fda56a564e..0000000000 --- a/bitbake/lib/bb/fetch2/local.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -BitBake 'Fetch' implementations - -Classes for obtaining upstream sources for the -BitBake build tools. - -""" - -# Copyright (C) 2003, 2004 Chris Larson -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig -# - -import os -import urllib.request, urllib.parse, urllib.error -import bb -import bb.utils -from bb.fetch2 import FetchMethod, FetchError, ParameterError -from bb.fetch2 import logger - -class Local(FetchMethod): - def supports(self, urldata, d): - """ - Check to see if a given url represents a local fetch. - """ - return urldata.type in ['file'] - - def urldata_init(self, ud, d): - # We don't set localfile as for this fetcher the file is already local! - ud.basename = os.path.basename(ud.path) - ud.basepath = ud.path - ud.needdonestamp = False - if "*" in ud.path: - raise bb.fetch2.ParameterError("file:// urls using globbing are no longer supported. Please place the files in a directory and reference that instead.", ud.url) - return - - def localpath(self, urldata, d): - """ - Return the local filename of a given url assuming a successful fetch. - """ - return self.localfile_searchpaths(urldata, d)[-1] - - def localfile_searchpaths(self, urldata, d): - """ - Return the local filename of a given url assuming a successful fetch. - """ - searched = [] - path = urldata.path - newpath = path - if path[0] == "/": - logger.debug2("Using absolute %s" % (path)) - return [path] - filespath = d.getVar('FILESPATH') - if filespath: - logger.debug2("Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":")))) - newpath, hist = bb.utils.which(filespath, path, history=True) - logger.debug2("Using %s for %s" % (newpath, path)) - searched.extend(hist) - return searched - - def need_update(self, ud, d): - if os.path.exists(ud.localpath): - return False - return True - - def download(self, urldata, d): - """Fetch urls (no-op for Local method)""" - # no need to fetch local files, we'll deal with them in place. - if self.supports_checksum(urldata) and not os.path.exists(urldata.localpath): - locations = [] - filespath = d.getVar('FILESPATH') - if filespath: - locations = filespath.split(":") - msg = "Unable to find file " + urldata.url + " anywhere to download to " + urldata.localpath + ". The paths that were searched were:\n " + "\n ".join(locations) - raise FetchError(msg) - - return True - - def checkstatus(self, fetch, urldata, d): - """ - Check the status of the url - """ - if os.path.exists(urldata.localpath): - return True - return False - - def clean(self, urldata, d): - return - diff --git a/bitbake/lib/bb/fetch2/npm.py b/bitbake/lib/bb/fetch2/npm.py deleted file mode 100644 index e469d66768..0000000000 --- a/bitbake/lib/bb/fetch2/npm.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright (C) 2020 Savoir-Faire Linux -# -# SPDX-License-Identifier: GPL-2.0-only -# -""" -BitBake 'Fetch' npm implementation - -npm fetcher support the SRC_URI with format of: -SRC_URI = "npm://some.registry.url;OptionA=xxx;OptionB=xxx;..." - -Supported SRC_URI options are: - -- package - The npm package name. This is a mandatory parameter. - -- version - The npm package version. This is a mandatory parameter. - -- downloadfilename - Specifies the filename used when storing the downloaded file. - -- destsuffix - Specifies the directory to use to unpack the package (default: npm). -""" - -import base64 -import json -import os -import re -import shlex -import tempfile -import bb -from bb.fetch2 import Fetch -from bb.fetch2 import FetchError -from bb.fetch2 import FetchMethod -from bb.fetch2 import MissingParameterError -from bb.fetch2 import ParameterError -from bb.fetch2 import URI -from bb.fetch2 import check_network_access -from bb.fetch2 import runfetchcmd -from bb.utils import is_semver - -def npm_package(package): - """Convert the npm package name to remove unsupported character""" - # For scoped package names ('@user/package') the '/' is replaced by a '-'. - # This is similar to what 'npm pack' does, but 'npm pack' also strips the - # leading '@', which can lead to ambiguous package names. - name = re.sub("/", "-", package) - name = name.lower() - name = re.sub(r"[^\-a-z0-9@]", "", name) - name = name.strip("-") - return name - - -def npm_filename(package, version): - """Get the filename of a npm package""" - return npm_package(package) + "-" + version + ".tgz" - -def npm_localfile(package, version=None): - """Get the local filename of a npm package""" - if version is not None: - filename = npm_filename(package, version) - else: - filename = package - return os.path.join("npm2", filename) - -def npm_integrity(integrity): - """ - Get the checksum name and expected value from the subresource integrity - https://www.w3.org/TR/SRI/ - """ - algo, value = integrity.split("-", maxsplit=1) - return "%ssum" % algo, base64.b64decode(value).hex() - -def npm_unpack(tarball, destdir, d): - """Unpack a npm tarball""" - bb.utils.mkdirhier(destdir) - cmd = "tar --extract --gzip --file=%s" % shlex.quote(tarball) - cmd += " --no-same-owner" - cmd += " --delay-directory-restore" - cmd += " --strip-components=1" - runfetchcmd(cmd, d, workdir=destdir) - runfetchcmd("chmod -R +X '%s'" % (destdir), d, quiet=True, workdir=destdir) - -class NpmEnvironment(object): - """ - Using a npm config file seems more reliable than using cli arguments. - This class allows to create a controlled environment for npm commands. - """ - def __init__(self, d, configs=[], npmrc=None): - self.d = d - - self.user_config = tempfile.NamedTemporaryFile(mode="w", buffering=1) - - hn = self._home_npmrc(d) - if hn is not None: - with open(hn, 'r') as hnf: - self.user_config.write(hnf.read()) - - for key, value in configs: - self.user_config.write("%s=%s\n" % (key, value)) - - if npmrc: - self.global_config_name = npmrc - else: - self.global_config_name = "/dev/null" - - def __del__(self): - if self.user_config: - self.user_config.close() - - def _home_npmrc(self, d): - """Function to return user's HOME .npmrc file (or None if it doesn't exist)""" - home_npmrc_file = os.path.join(os.environ.get("HOME"), ".npmrc") - if d.getVar("BB_USE_HOME_NPMRC") == "1" and os.path.exists(home_npmrc_file): - bb.warn(f"BB_USE_HOME_NPMRC flag set and valid .npmrc detected - "\ - f"npm fetcher will use {home_npmrc_file}") - return home_npmrc_file - return None - - def run(self, cmd, args=None, configs=None, workdir=None): - """Run npm command in a controlled environment""" - with tempfile.TemporaryDirectory() as tmpdir: - d = bb.data.createCopy(self.d) - d.setVar("PATH", d.getVar("PATH")) # PATH might contain $HOME - evaluate it before patching - d.setVar("HOME", tmpdir) - - if not workdir: - workdir = tmpdir - - def _run(cmd): - cmd = "NPM_CONFIG_USERCONFIG=%s " % (self.user_config.name) + cmd - cmd = "NPM_CONFIG_GLOBALCONFIG=%s " % (self.global_config_name) + cmd - return runfetchcmd(cmd, d, workdir=workdir) - - if configs: - bb.warn("Use of configs argument of NpmEnvironment.run() function" - " is deprecated. Please use args argument instead.") - for key, value in configs: - cmd += " --%s=%s" % (key, shlex.quote(value)) - - if args: - for key, value in args: - cmd += " --%s=%s" % (key, shlex.quote(value)) - - return _run(cmd) - -class Npm(FetchMethod): - """Class to fetch a package from a npm registry""" - - def supports(self, ud, d): - """Check if a given url can be fetched with npm""" - return ud.type in ["npm"] - - def urldata_init(self, ud, d): - """Init npm specific variables within url data""" - ud.package = None - ud.version = None - ud.registry = None - - # Get the 'package' parameter - if "package" in ud.parm: - ud.package = ud.parm.get("package") - - if not ud.package: - raise MissingParameterError("Parameter 'package' required", ud.url) - - # Get the 'version' parameter - if "version" in ud.parm: - ud.version = ud.parm.get("version") - - if not ud.version: - raise MissingParameterError("Parameter 'version' required", ud.url) - - if not is_semver(ud.version) and not ud.version == "latest": - raise ParameterError("Invalid 'version' parameter", ud.url) - - # Extract the 'registry' part of the url - ud.registry = re.sub(r"^npm://", "https://", ud.url.split(";")[0]) - - # Using the 'downloadfilename' parameter as local filename - # or the npm package name. - if "downloadfilename" in ud.parm: - ud.localfile = npm_localfile(ud.parm["downloadfilename"]) - else: - ud.localfile = npm_localfile(ud.package, ud.version) - - # Get the base 'npm' command - ud.basecmd = d.getVar("FETCHCMD_npm") or "npm" - - # This fetcher resolves a URI from a npm package name and version and - # then forwards it to a proxy fetcher. A resolve file containing the - # resolved URI is created to avoid unwanted network access (if the file - # already exists). The management of the donestamp file, the lockfile - # and the checksums are forwarded to the proxy fetcher. - ud.proxy = None - ud.needdonestamp = False - ud.resolvefile = self.localpath(ud, d) + ".resolved" - - def _resolve_proxy_url(self, ud, d): - def _npm_view(): - args = [] - args.append(("json", "true")) - args.append(("registry", ud.registry)) - pkgver = shlex.quote(ud.package + "@" + ud.version) - cmd = ud.basecmd + " view %s" % pkgver - env = NpmEnvironment(d) - check_network_access(d, cmd, ud.registry) - view_string = env.run(cmd, args=args) - - if not view_string: - raise FetchError("Unavailable package %s" % pkgver, ud.url) - - try: - view = json.loads(view_string) - - error = view.get("error") - if error is not None: - raise FetchError(error.get("summary"), ud.url) - - if ud.version == "latest": - bb.warn("The npm package %s is using the latest " \ - "version available. This could lead to " \ - "non-reproducible builds." % pkgver) - elif ud.version != view.get("version"): - raise ParameterError("Invalid 'version' parameter", ud.url) - - return view - - except Exception as e: - raise FetchError("Invalid view from npm: %s" % str(e), ud.url) - - def _get_url(view): - tarball_url = view.get("dist", {}).get("tarball") - - if tarball_url is None: - raise FetchError("Invalid 'dist.tarball' in view", ud.url) - - uri = URI(tarball_url) - uri.params["downloadfilename"] = ud.localfile - - integrity = view.get("dist", {}).get("integrity") - shasum = view.get("dist", {}).get("shasum") - - if integrity is not None: - checksum_name, checksum_expected = npm_integrity(integrity) - uri.params[checksum_name] = checksum_expected - elif shasum is not None: - uri.params["sha1sum"] = shasum - else: - raise FetchError("Invalid 'dist.integrity' in view", ud.url) - - return str(uri) - - url = _get_url(_npm_view()) - - bb.utils.mkdirhier(os.path.dirname(ud.resolvefile)) - with open(ud.resolvefile, "w") as f: - f.write(url) - - def _setup_proxy(self, ud, d): - if ud.proxy is None: - if not os.path.exists(ud.resolvefile): - self._resolve_proxy_url(ud, d) - - with open(ud.resolvefile, "r") as f: - url = f.read() - - # Avoid conflicts between the environment data and: - # - the proxy url checksum - data = bb.data.createCopy(d) - data.delVarFlags("SRC_URI") - ud.proxy = Fetch([url], data) - - def _get_proxy_method(self, ud, d): - self._setup_proxy(ud, d) - proxy_url = ud.proxy.urls[0] - proxy_ud = ud.proxy.ud[proxy_url] - proxy_d = ud.proxy.d - proxy_ud.setup_localpath(proxy_d) - return proxy_ud.method, proxy_ud, proxy_d - - def verify_donestamp(self, ud, d): - """Verify the donestamp file""" - proxy_m, proxy_ud, proxy_d = self._get_proxy_method(ud, d) - return proxy_m.verify_donestamp(proxy_ud, proxy_d) - - def update_donestamp(self, ud, d): - """Update the donestamp file""" - proxy_m, proxy_ud, proxy_d = self._get_proxy_method(ud, d) - proxy_m.update_donestamp(proxy_ud, proxy_d) - - def need_update(self, ud, d): - """Force a fetch, even if localpath exists ?""" - if not os.path.exists(ud.resolvefile): - return True - if ud.version == "latest": - return True - proxy_m, proxy_ud, proxy_d = self._get_proxy_method(ud, d) - return proxy_m.need_update(proxy_ud, proxy_d) - - def try_mirrors(self, fetch, ud, d, mirrors): - """Try to use a mirror""" - proxy_m, proxy_ud, proxy_d = self._get_proxy_method(ud, d) - return proxy_m.try_mirrors(fetch, proxy_ud, proxy_d, mirrors) - - def download(self, ud, d): - """Fetch url""" - self._setup_proxy(ud, d) - ud.proxy.download() - - def unpack(self, ud, rootdir, d): - """Unpack the downloaded archive""" - destsuffix = ud.parm.get("destsuffix", "npm") - destdir = os.path.join(rootdir, destsuffix) - npm_unpack(ud.localpath, destdir, d) - ud.unpack_tracer.unpack("npm", destdir) - - def clean(self, ud, d): - """Clean any existing full or partial download""" - if os.path.exists(ud.resolvefile): - self._setup_proxy(ud, d) - ud.proxy.clean() - bb.utils.remove(ud.resolvefile) - - def done(self, ud, d): - """Is the download done ?""" - if not os.path.exists(ud.resolvefile): - return False - proxy_m, proxy_ud, proxy_d = self._get_proxy_method(ud, d) - return proxy_m.done(proxy_ud, proxy_d) diff --git a/bitbake/lib/bb/fetch2/npmsw.py b/bitbake/lib/bb/fetch2/npmsw.py deleted file mode 100644 index 2f9599ee9e..0000000000 --- a/bitbake/lib/bb/fetch2/npmsw.py +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright (C) 2020 Savoir-Faire Linux -# -# SPDX-License-Identifier: GPL-2.0-only -# -""" -BitBake 'Fetch' npm shrinkwrap implementation - -npm fetcher support the SRC_URI with format of: -SRC_URI = "npmsw://some.registry.url;OptionA=xxx;OptionB=xxx;..." - -Supported SRC_URI options are: - -- dev - Set to 1 to also install devDependencies. - -- destsuffix - Specifies the directory to use to unpack the dependencies (default: ${S}). -""" - -import json -import os -import re -import bb -from bb.fetch2 import Fetch -from bb.fetch2 import FetchMethod -from bb.fetch2 import ParameterError -from bb.fetch2 import runfetchcmd -from bb.fetch2 import URI -from bb.fetch2.npm import npm_integrity -from bb.fetch2.npm import npm_localfile -from bb.fetch2.npm import npm_unpack -from bb.utils import is_semver -from bb.utils import lockfile -from bb.utils import unlockfile - -def foreach_dependencies(shrinkwrap, callback=None, dev=False): - """ - Run a callback for each dependencies of a shrinkwrap file. - The callback is using the format: - callback(name, data, location) - with: - name = the package name (string) - data = the package data (dictionary) - location = the location of the package (string) - """ - packages = shrinkwrap.get("packages") - if not packages: - raise FetchError("Invalid shrinkwrap file format") - - for location, data in packages.items(): - # Skip empty main and local link target packages - if not location.startswith('node_modules/'): - continue - elif not dev and data.get("dev", False): - continue - elif data.get("inBundle", False): - continue - name = location.split('node_modules/')[-1] - callback(name, data, location) - -class NpmShrinkWrap(FetchMethod): - """Class to fetch all package from a shrinkwrap file""" - - def supports(self, ud, d): - """Check if a given url can be fetched with npmsw""" - return ud.type in ["npmsw"] - - def urldata_init(self, ud, d): - """Init npmsw specific variables within url data""" - - # Get the 'shrinkwrap' parameter - ud.shrinkwrap_file = re.sub(r"^npmsw://", "", ud.url.split(";")[0]) - - # Get the 'dev' parameter - ud.dev = bb.utils.to_boolean(ud.parm.get("dev"), False) - - # Resolve the dependencies - ud.deps = [] - - def _resolve_dependency(name, params, destsuffix): - url = None - localpath = None - extrapaths = [] - unpack = True - - integrity = params.get("integrity") - resolved = params.get("resolved") - version = params.get("version") - link = params.get("link", False) - - # Handle link sources - if link: - localpath = resolved - unpack = False - - # Handle registry sources - elif version and is_semver(version) and integrity: - # Handle duplicate dependencies without url - if not resolved: - return - - localfile = npm_localfile(name, version) - - uri = URI(resolved) - uri.params["downloadfilename"] = localfile - - checksum_name, checksum_expected = npm_integrity(integrity) - uri.params[checksum_name] = checksum_expected - - url = str(uri) - - localpath = os.path.join(d.getVar("DL_DIR"), localfile) - - # Create a resolve file to mimic the npm fetcher and allow - # re-usability of the downloaded file. - resolvefile = localpath + ".resolved" - - bb.utils.mkdirhier(os.path.dirname(resolvefile)) - with open(resolvefile, "w") as f: - f.write(url) - - extrapaths.append(resolvefile) - - # Handle http tarball sources - elif resolved.startswith("http") and integrity: - localfile = npm_localfile(os.path.basename(resolved)) - - uri = URI(resolved) - uri.params["downloadfilename"] = localfile - - checksum_name, checksum_expected = npm_integrity(integrity) - uri.params[checksum_name] = checksum_expected - - url = str(uri) - - localpath = os.path.join(d.getVar("DL_DIR"), localfile) - - # Handle local tarball sources - elif resolved.startswith("file"): - localpath = resolved[5:] - - # Handle git sources - elif resolved.startswith("git"): - regex = re.compile(r""" - ^ - git\+ - (?P[a-z]+) - :// - (?P[^#]+) - \# - (?P[0-9a-f]+) - $ - """, re.VERBOSE) - - match = regex.match(resolved) - if not match: - raise ParameterError("Invalid git url: %s" % resolved, ud.url) - - groups = match.groupdict() - - uri = URI("git://" + str(groups["url"])) - uri.params["protocol"] = str(groups["protocol"]) - uri.params["rev"] = str(groups["rev"]) - uri.params["nobranch"] = "1" - uri.params["destsuffix"] = destsuffix - - url = str(uri) - - else: - raise ParameterError("Unsupported dependency: %s" % name, ud.url) - - # name is needed by unpack tracer for module mapping - ud.deps.append({ - "name": name, - "url": url, - "localpath": localpath, - "extrapaths": extrapaths, - "destsuffix": destsuffix, - "unpack": unpack, - }) - - try: - with open(ud.shrinkwrap_file, "r") as f: - shrinkwrap = json.load(f) - except Exception as e: - raise ParameterError("Invalid shrinkwrap file: %s" % str(e), ud.url) - - foreach_dependencies(shrinkwrap, _resolve_dependency, ud.dev) - - # Avoid conflicts between the environment data and: - # - the proxy url revision - # - the proxy url checksum - data = bb.data.createCopy(d) - data.delVar("SRCREV") - data.delVarFlags("SRC_URI") - - # This fetcher resolves multiple URIs from a shrinkwrap file and then - # forwards it to a proxy fetcher. The management of the donestamp file, - # the lockfile and the checksums are forwarded to the proxy fetcher. - shrinkwrap_urls = [dep["url"] for dep in ud.deps if dep["url"]] - if shrinkwrap_urls: - ud.proxy = Fetch(shrinkwrap_urls, data) - ud.needdonestamp = False - - @staticmethod - def _foreach_proxy_method(ud, handle): - returns = [] - #Check if there are dependencies before try to fetch them - if len(ud.deps) > 0: - for proxy_url in ud.proxy.urls: - proxy_ud = ud.proxy.ud[proxy_url] - proxy_d = ud.proxy.d - proxy_ud.setup_localpath(proxy_d) - lf = lockfile(proxy_ud.lockfile) - returns.append(handle(proxy_ud.method, proxy_ud, proxy_d)) - unlockfile(lf) - return returns - - def verify_donestamp(self, ud, d): - """Verify the donestamp file""" - def _handle(m, ud, d): - return m.verify_donestamp(ud, d) - return all(self._foreach_proxy_method(ud, _handle)) - - def update_donestamp(self, ud, d): - """Update the donestamp file""" - def _handle(m, ud, d): - m.update_donestamp(ud, d) - self._foreach_proxy_method(ud, _handle) - - def need_update(self, ud, d): - """Force a fetch, even if localpath exists ?""" - def _handle(m, ud, d): - return m.need_update(ud, d) - return all(self._foreach_proxy_method(ud, _handle)) - - def try_mirrors(self, fetch, ud, d, mirrors): - """Try to use a mirror""" - def _handle(m, ud, d): - return m.try_mirrors(fetch, ud, d, mirrors) - return all(self._foreach_proxy_method(ud, _handle)) - - def download(self, ud, d): - """Fetch url""" - ud.proxy.download() - - def unpack(self, ud, rootdir, d): - """Unpack the downloaded dependencies""" - destdir = rootdir - destsuffix = ud.parm.get("destsuffix") - if destsuffix: - destdir = os.path.join(rootdir, destsuffix) - ud.unpack_tracer.unpack("npm-shrinkwrap", destdir) - - bb.utils.mkdirhier(destdir) - bb.utils.copyfile(ud.shrinkwrap_file, - os.path.join(destdir, "npm-shrinkwrap.json")) - - auto = [dep["url"] for dep in ud.deps if not dep["localpath"]] - manual = [dep for dep in ud.deps if dep["localpath"]] - - if auto: - ud.proxy.unpack(destdir, auto) - - for dep in manual: - depdestdir = os.path.join(destdir, dep["destsuffix"]) - if dep["url"]: - npm_unpack(dep["localpath"], depdestdir, d) - else: - depsrcdir= os.path.join(destdir, dep["localpath"]) - if dep["unpack"]: - npm_unpack(depsrcdir, depdestdir, d) - else: - bb.utils.mkdirhier(depdestdir) - cmd = 'cp -fpPRH "%s/." .' % (depsrcdir) - runfetchcmd(cmd, d, workdir=depdestdir) - - def clean(self, ud, d): - """Clean any existing full or partial download""" - ud.proxy.clean() - - # Clean extra files - for dep in ud.deps: - for path in dep["extrapaths"]: - bb.utils.remove(path) - - def done(self, ud, d): - """Is the download done ?""" - def _handle(m, ud, d): - return m.done(ud, d) - return all(self._foreach_proxy_method(ud, _handle)) diff --git a/bitbake/lib/bb/fetch2/osc.py b/bitbake/lib/bb/fetch2/osc.py deleted file mode 100644 index 495ac8a30a..0000000000 --- a/bitbake/lib/bb/fetch2/osc.py +++ /dev/null @@ -1,165 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# -""" -Bitbake "Fetch" implementation for osc (Opensuse build service client). -Based on the svn "Fetch" implementation. - -""" - -import logging -import os -import re -import bb -from bb.fetch2 import FetchMethod -from bb.fetch2 import FetchError -from bb.fetch2 import MissingParameterError -from bb.fetch2 import runfetchcmd - -logger = logging.getLogger(__name__) - -class Osc(FetchMethod): - """Class to fetch a module or modules from Opensuse build server - repositories.""" - - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with osc. - """ - return ud.type in ['osc'] - - def urldata_init(self, ud, d): - if not "module" in ud.parm: - raise MissingParameterError('module', ud.url) - - ud.module = ud.parm["module"] - - # Create paths to osc checkouts - oscdir = d.getVar("OSCDIR") or (d.getVar("DL_DIR") + "/osc") - relpath = self._strip_leading_slashes(ud.path) - ud.oscdir = oscdir - ud.pkgdir = os.path.join(oscdir, ud.host) - ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module) - - if 'rev' in ud.parm: - ud.revision = ud.parm['rev'] - else: - pv = d.getVar("PV", False) - rev = bb.fetch2.srcrev_internal_helper(ud, d, '') - if rev: - ud.revision = rev - else: - ud.revision = "" - - ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), relpath.replace('/', '.'), ud.revision)) - - def _buildosccommand(self, ud, d, command): - """ - Build up an ocs commandline based on ud - command is "fetch", "update", "info" - """ - - basecmd = d.getVar("FETCHCMD_osc") or "/usr/bin/env osc" - - proto = ud.parm.get('protocol', 'https') - - options = [] - - config = "-c %s" % self.generate_config(ud, d) - - if getattr(ud, 'revision', ''): - options.append("-r %s" % ud.revision) - - coroot = self._strip_leading_slashes(ud.path) - - if command == "fetch": - osccmd = "%s %s -A %s://%s co %s/%s %s" % (basecmd, config, proto, ud.host, coroot, ud.module, " ".join(options)) - elif command == "update": - osccmd = "%s %s -A %s://%s up %s" % (basecmd, config, proto, ud.host, " ".join(options)) - elif command == "api_source": - osccmd = "%s %s -A %s://%s api source/%s/%s" % (basecmd, config, proto, ud.host, coroot, ud.module) - else: - raise FetchError("Invalid osc command %s" % command, ud.url) - - return osccmd - - def _latest_revision(self, ud, d, name): - """ - Fetch latest revision for the given package - """ - api_source_cmd = self._buildosccommand(ud, d, "api_source") - - output = runfetchcmd(api_source_cmd, d) - match = re.match(r'', output) - if match is None: - raise FetchError("Unable to parse osc response", ud.url) - return match.groups()[0] - - def _revision_key(self, ud, d, name): - """ - Return a unique key for the url - """ - # Collapse adjacent slashes - slash_re = re.compile(r"/+") - rev = getattr(ud, 'revision', "latest") - return "osc:%s%s.%s.%s" % (ud.host, slash_re.sub(".", ud.path), name, rev) - - def download(self, ud, d): - """ - Fetch url - """ - - logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'") - - if os.access(ud.moddir, os.R_OK): - oscupdatecmd = self._buildosccommand(ud, d, "update") - logger.info("Update "+ ud.url) - # update sources there - logger.debug("Running %s", oscupdatecmd) - bb.fetch2.check_network_access(d, oscupdatecmd, ud.url) - runfetchcmd(oscupdatecmd, d, workdir=ud.moddir) - else: - oscfetchcmd = self._buildosccommand(ud, d, "fetch") - logger.info("Fetch " + ud.url) - # check out sources there - bb.utils.mkdirhier(ud.pkgdir) - logger.debug("Running %s", oscfetchcmd) - bb.fetch2.check_network_access(d, oscfetchcmd, ud.url) - runfetchcmd(oscfetchcmd, d, workdir=ud.pkgdir) - - # tar them up to a defined filename - runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d, - cleanup=[ud.localpath], workdir=os.path.join(ud.pkgdir + ud.path)) - - def supports_srcrev(self): - return False - - def generate_config(self, ud, d): - """ - Generate a .oscrc to be used for this run. - """ - - config_path = os.path.join(ud.oscdir, "oscrc") - if not os.path.exists(ud.oscdir): - bb.utils.mkdirhier(ud.oscdir) - - if (os.path.exists(config_path)): - os.remove(config_path) - - f = open(config_path, 'w') - proto = ud.parm.get('protocol', 'https') - f.write("[general]\n") - f.write("apiurl = %s://%s\n" % (proto, ud.host)) - f.write("su-wrapper = su -c\n") - f.write("build-root = %s\n" % d.getVar('WORKDIR')) - f.write("urllist = %s\n" % d.getVar("OSCURLLIST")) - f.write("extra-pkgs = gzip\n") - f.write("\n") - f.write("[%s://%s]\n" % (proto, ud.host)) - f.write("user = %s\n" % ud.parm["user"]) - f.write("pass = %s\n" % ud.parm["pswd"]) - f.close() - - return config_path diff --git a/bitbake/lib/bb/fetch2/perforce.py b/bitbake/lib/bb/fetch2/perforce.py deleted file mode 100644 index 3b6fa4b1ec..0000000000 --- a/bitbake/lib/bb/fetch2/perforce.py +++ /dev/null @@ -1,267 +0,0 @@ -""" -BitBake 'Fetch' implementation for perforce - -Supported SRC_URI options are: - -- module - The top-level location to fetch while preserving the remote paths - - The value of module can point to either a directory or a file. The result, - in both cases, is that the fetcher will preserve all file paths starting - from the module path. That is, the top-level directory in the module value - will also be the top-level directory in P4DIR. - -- remotepath - If the value "keep" is given, the full depot location of each file is - preserved in P4DIR. This option overrides the effect of the module option. - -""" - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2016 Kodak Alaris, Inc. -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import os -import bb -from bb.fetch2 import FetchMethod -from bb.fetch2 import FetchError -from bb.fetch2 import logger -from bb.fetch2 import runfetchcmd - -class PerforceProgressHandler (bb.progress.BasicProgressHandler): - """ - Implements basic progress information for perforce, based on the number of - files to be downloaded. - - The p4 print command will print one line per file, therefore it can be used - to "count" the number of files already completed and give an indication of - the progress. - """ - def __init__(self, d, num_files): - self._num_files = num_files - self._count = 0 - super(PerforceProgressHandler, self).__init__(d) - - # Send an initial progress event so the bar gets shown - self._fire_progress(-1) - - def write(self, string): - self._count = self._count + 1 - - percent = int(100.0 * float(self._count) / float(self._num_files)) - - # In case something goes wrong, we try to preserve our sanity - if percent > 100: - percent = 100 - - self.update(percent) - - super(PerforceProgressHandler, self).write(string) - -class Perforce(FetchMethod): - """ Class to fetch from perforce repositories """ - def supports(self, ud, d): - """ Check to see if a given url can be fetched with perforce. """ - return ud.type in ['p4'] - - def urldata_init(self, ud, d): - """ - Initialize perforce specific variables within url data. If P4CONFIG is - provided by the env, use it. If P4PORT is specified by the recipe, use - its values, which may override the settings in P4CONFIG. - """ - ud.basecmd = d.getVar("FETCHCMD_p4") or "/usr/bin/env p4" - - ud.dldir = d.getVar("P4DIR") or (d.getVar("DL_DIR") + "/p4") - - path = ud.url.split('://')[1] - path = path.split(';')[0] - delim = path.find('@'); - if delim != -1: - (ud.user, ud.pswd) = path.split('@')[0].split(':') - ud.path = path.split('@')[1] - else: - ud.path = path - - ud.usingp4config = False - p4port = d.getVar('P4PORT') - - if p4port: - logger.debug('Using recipe provided P4PORT: %s' % p4port) - ud.host = p4port - else: - logger.debug('Trying to use P4CONFIG to automatically set P4PORT...') - ud.usingp4config = True - p4cmd = '%s info | grep "Server address"' % ud.basecmd - bb.fetch2.check_network_access(d, p4cmd, ud.url) - ud.host = runfetchcmd(p4cmd, d, True) - ud.host = ud.host.split(': ')[1].strip() - logger.debug('Determined P4PORT to be: %s' % ud.host) - if not ud.host: - raise FetchError('Could not determine P4PORT from P4CONFIG') - - # Fetcher options - ud.module = ud.parm.get('module') - ud.keepremotepath = (ud.parm.get('remotepath', '') == 'keep') - - if ud.path.find('/...') >= 0: - ud.pathisdir = True - else: - ud.pathisdir = False - - # Avoid using the "/..." syntax in SRC_URI when a module value is given - if ud.pathisdir and ud.module: - raise FetchError('SRC_URI depot path cannot not end in /... when a module value is given') - - cleanedpath = ud.path.replace('/...', '').replace('/', '.') - cleanedhost = ud.host.replace(':', '.') - - cleanedmodule = "" - # Merge the path and module into the final depot location - if ud.module: - if ud.module.find('/') == 0: - raise FetchError('module cannot begin with /') - ud.path = os.path.join(ud.path, ud.module) - - # Append the module path to the local pkg name - cleanedmodule = ud.module.replace('/...', '').replace('/', '.') - cleanedpath += '--%s' % cleanedmodule - - ud.pkgdir = os.path.join(ud.dldir, cleanedhost, cleanedpath) - - ud.setup_revisions(d) - - ud.localfile = d.expand('%s_%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, cleanedmodule, ud.revision)) - - def _buildp4command(self, ud, d, command, depot_filename=None): - """ - Build a p4 commandline. Valid commands are "changes", "print", and - "files". depot_filename is the full path to the file in the depot - including the trailing '#rev' value. - """ - p4opt = "" - - if ud.user: - p4opt += ' -u "%s"' % (ud.user) - - if ud.pswd: - p4opt += ' -P "%s"' % (ud.pswd) - - if ud.host and not ud.usingp4config: - p4opt += ' -p %s' % (ud.host) - - if hasattr(ud, 'revision') and ud.revision: - pathnrev = '%s@%s' % (ud.path, ud.revision) - else: - pathnrev = '%s' % (ud.path) - - if depot_filename: - if ud.keepremotepath: - # preserve everything, remove the leading // - filename = depot_filename.lstrip('/') - elif ud.module: - # remove everything up to the module path - modulepath = ud.module.rstrip('/...') - filename = depot_filename[depot_filename.rfind(modulepath):] - elif ud.pathisdir: - # Remove leading (visible) path to obtain the filepath - filename = depot_filename[len(ud.path)-1:] - else: - # Remove everything, except the filename - filename = depot_filename[depot_filename.rfind('/'):] - - filename = filename[:filename.find('#')] # Remove trailing '#rev' - - if command == 'changes': - p4cmd = '%s%s changes -m 1 //%s' % (ud.basecmd, p4opt, pathnrev) - elif command == 'print': - if depot_filename is not None: - p4cmd = '%s%s print -o "p4/%s" "%s"' % (ud.basecmd, p4opt, filename, depot_filename) - else: - raise FetchError('No depot file name provided to p4 %s' % command, ud.url) - elif command == 'files': - p4cmd = '%s%s files //%s' % (ud.basecmd, p4opt, pathnrev) - else: - raise FetchError('Invalid p4 command %s' % command, ud.url) - - return p4cmd - - def _p4listfiles(self, ud, d): - """ - Return a list of the file names which are present in the depot using the - 'p4 files' command, including trailing '#rev' file revision indicator - """ - p4cmd = self._buildp4command(ud, d, 'files') - bb.fetch2.check_network_access(d, p4cmd, ud.url) - p4fileslist = runfetchcmd(p4cmd, d, True) - p4fileslist = [f.rstrip() for f in p4fileslist.splitlines()] - - if not p4fileslist: - raise FetchError('Unable to fetch listing of p4 files from %s@%s' % (ud.host, ud.path)) - - count = 0 - filelist = [] - - for filename in p4fileslist: - item = filename.split(' - ') - lastaction = item[1].split() - logger.debug('File: %s Last Action: %s' % (item[0], lastaction[0])) - if lastaction[0] == 'delete': - continue - filelist.append(item[0]) - - return filelist - - def download(self, ud, d): - """ Get the list of files, fetch each one """ - filelist = self._p4listfiles(ud, d) - if not filelist: - raise FetchError('No files found in depot %s@%s' % (ud.host, ud.path)) - - bb.utils.remove(ud.pkgdir, True) - bb.utils.mkdirhier(ud.pkgdir) - - progresshandler = PerforceProgressHandler(d, len(filelist)) - - for afile in filelist: - p4fetchcmd = self._buildp4command(ud, d, 'print', afile) - bb.fetch2.check_network_access(d, p4fetchcmd, ud.url) - runfetchcmd(p4fetchcmd, d, workdir=ud.pkgdir, log=progresshandler) - - runfetchcmd('tar -czf %s p4' % (ud.localpath), d, cleanup=[ud.localpath], workdir=ud.pkgdir) - - def clean(self, ud, d): - """ Cleanup p4 specific files and dirs""" - bb.utils.remove(ud.localpath) - bb.utils.remove(ud.pkgdir, True) - - def supports_srcrev(self): - return True - - def _revision_key(self, ud, d, name): - """ Return a unique key for the url """ - return 'p4:%s' % ud.pkgdir - - def _latest_revision(self, ud, d, name): - """ Return the latest upstream scm revision number """ - p4cmd = self._buildp4command(ud, d, "changes") - bb.fetch2.check_network_access(d, p4cmd, ud.url) - tip = runfetchcmd(p4cmd, d, True) - - if not tip: - raise FetchError('Could not determine the latest perforce changelist') - - tipcset = tip.split(' ')[1] - logger.debug('p4 tip found to be changelist %s' % tipcset) - return tipcset - - def sortable_revision(self, ud, d, name): - """ Return a sortable revision number """ - return False, self._build_revision(ud, d) - - def _build_revision(self, ud, d): - return ud.revision - diff --git a/bitbake/lib/bb/fetch2/repo.py b/bitbake/lib/bb/fetch2/repo.py deleted file mode 100644 index fa4cb8149b..0000000000 --- a/bitbake/lib/bb/fetch2/repo.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -BitBake "Fetch" repo (git) implementation - -""" - -# Copyright (C) 2009 Tom Rini -# -# Based on git.py which is: -# Copyright (C) 2005 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import bb -from bb.fetch2 import FetchMethod -from bb.fetch2 import runfetchcmd -from bb.fetch2 import logger - -class Repo(FetchMethod): - """Class to fetch a module or modules from repo (git) repositories""" - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with repo. - """ - return ud.type in ["repo"] - - def urldata_init(self, ud, d): - """ - We don"t care about the git rev of the manifests repository, but - we do care about the manifest to use. The default is "default". - We also care about the branch or tag to be used. The default is - "master". - """ - - ud.basecmd = d.getVar("FETCHCMD_repo") or "/usr/bin/env repo" - - ud.proto = ud.parm.get('protocol', 'git') - ud.branch = ud.parm.get('branch', 'master') - ud.manifest = ud.parm.get('manifest', 'default.xml') - if not ud.manifest.endswith('.xml'): - ud.manifest += '.xml' - - ud.localfile = d.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch)) - - def download(self, ud, d): - """Fetch url""" - - if os.access(os.path.join(d.getVar("DL_DIR"), ud.localfile), os.R_OK): - logger.debug("%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath) - return - - repodir = d.getVar("REPODIR") or (d.getVar("DL_DIR") + "/repo") - gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", ".")) - codir = os.path.join(repodir, gitsrcname, ud.manifest) - - if ud.user: - username = ud.user + "@" - else: - username = "" - - repodir = os.path.join(codir, "repo") - bb.utils.mkdirhier(repodir) - if not os.path.exists(os.path.join(repodir, ".repo")): - bb.fetch2.check_network_access(d, "%s init -m %s -b %s -u %s://%s%s%s" % (ud.basecmd, ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url) - runfetchcmd("%s init -m %s -b %s -u %s://%s%s%s" % (ud.basecmd, ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir) - - bb.fetch2.check_network_access(d, "%s sync %s" % (ud.basecmd, ud.url), ud.url) - runfetchcmd("%s sync" % ud.basecmd, d, workdir=repodir) - - scmdata = ud.parm.get("scmdata", "") - if scmdata == "keep": - tar_flags = "" - else: - tar_flags = "--exclude='.repo' --exclude='.git'" - - # Create a cache - runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.join(".", "*") ), d, workdir=codir) - - def supports_srcrev(self): - return False - - def _build_revision(self, ud, d): - return ud.manifest - - def _want_sortable_revision(self, ud, d): - return False diff --git a/bitbake/lib/bb/fetch2/s3.py b/bitbake/lib/bb/fetch2/s3.py deleted file mode 100644 index 22c0538139..0000000000 --- a/bitbake/lib/bb/fetch2/s3.py +++ /dev/null @@ -1,124 +0,0 @@ -""" -BitBake 'Fetch' implementation for Amazon AWS S3. - -Class for fetching files from Amazon S3 using the AWS Command Line Interface. -The aws tool must be correctly installed and configured prior to use. - -""" - -# Copyright (C) 2017, Andre McCurdy -# -# Based in part on bb.fetch2.wget: -# Copyright (C) 2003, 2004 Chris Larson -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import os -import bb -import urllib.request, urllib.parse, urllib.error -import re -from bb.fetch2 import FetchMethod -from bb.fetch2 import FetchError -from bb.fetch2 import runfetchcmd - -def convertToBytes(value, unit): - value = float(value) - if (unit == "KiB"): - value = value*1024.0; - elif (unit == "MiB"): - value = value*1024.0*1024.0; - elif (unit == "GiB"): - value = value*1024.0*1024.0*1024.0; - return value - -class S3ProgressHandler(bb.progress.LineFilterProgressHandler): - """ - Extract progress information from s3 cp output, e.g.: - Completed 5.1 KiB/8.8 GiB (12.0 MiB/s) with 1 file(s) remaining - """ - def __init__(self, d): - super(S3ProgressHandler, self).__init__(d) - # Send an initial progress event so the bar gets shown - self._fire_progress(0) - - def writeline(self, line): - percs = re.findall(r'^Completed (\d+.{0,1}\d*) (\w+)\/(\d+.{0,1}\d*) (\w+) (\(.+\)) with\s+', line) - if percs: - completed = (percs[-1][0]) - completedUnit = (percs[-1][1]) - total = (percs[-1][2]) - totalUnit = (percs[-1][3]) - completed = convertToBytes(completed, completedUnit) - total = convertToBytes(total, totalUnit) - progress = (completed/total)*100.0 - rate = percs[-1][4] - self.update(progress, rate) - return False - return True - - -class S3(FetchMethod): - """Class to fetch urls via 'aws s3'""" - - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with s3. - """ - return ud.type in ['s3'] - - def recommends_checksum(self, urldata): - return True - - def urldata_init(self, ud, d): - if 'downloadfilename' in ud.parm: - ud.basename = ud.parm['downloadfilename'] - else: - ud.basename = os.path.basename(ud.path) - - ud.localfile = ud.basename - - ud.basecmd = d.getVar("FETCHCMD_s3") or "/usr/bin/env aws s3" - - def download(self, ud, d): - """ - Fetch urls - Assumes localpath was called first - """ - - cmd = '%s cp s3://%s%s %s' % (ud.basecmd, ud.host, ud.path, ud.localpath) - bb.fetch2.check_network_access(d, cmd, ud.url) - - progresshandler = S3ProgressHandler(d) - runfetchcmd(cmd, d, False, log=progresshandler) - - # Additional sanity checks copied from the wget class (although there - # are no known issues which mean these are required, treat the aws cli - # tool with a little healthy suspicion). - - if not os.path.exists(ud.localpath): - raise FetchError("The aws cp command returned success for s3://%s%s but %s doesn't exist?!" % (ud.host, ud.path, ud.localpath)) - - if os.path.getsize(ud.localpath) == 0: - os.remove(ud.localpath) - raise FetchError("The aws cp command for s3://%s%s resulted in a zero size file?! Deleting and failing since this isn't right." % (ud.host, ud.path)) - - return True - - def checkstatus(self, fetch, ud, d): - """ - Check the status of a URL - """ - - cmd = '%s ls s3://%s%s' % (ud.basecmd, ud.host, ud.path) - bb.fetch2.check_network_access(d, cmd, ud.url) - output = runfetchcmd(cmd, d) - - # "aws s3 ls s3://mybucket/foo" will exit with success even if the file - # is not found, so check output of the command to confirm success. - - if not output: - raise FetchError("The aws ls command for s3://%s%s gave empty output" % (ud.host, ud.path)) - - return True diff --git a/bitbake/lib/bb/fetch2/sftp.py b/bitbake/lib/bb/fetch2/sftp.py deleted file mode 100644 index bee71a0d0d..0000000000 --- a/bitbake/lib/bb/fetch2/sftp.py +++ /dev/null @@ -1,112 +0,0 @@ -""" -BitBake SFTP Fetch implementation - -Class for fetching files via SFTP. It tries to adhere to the (now -expired) IETF Internet Draft for "Uniform Resource Identifier (URI) -Scheme for Secure File Transfer Protocol (SFTP) and Secure Shell -(SSH)" (SECSH URI). - -It uses SFTP (as to adhere to the SECSH URI specification). It only -supports key based authentication, not password. This class, unlike -the SSH fetcher, does not support fetching a directory tree from the -remote. - - http://tools.ietf.org/html/draft-ietf-secsh-scp-sftp-ssh-uri-04 - https://www.iana.org/assignments/uri-schemes/prov/sftp - https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13 - -Please note that '/' is used as host path seperator, and not ":" -as you may be used to from the scp/sftp commands. You can use a -~ (tilde) to specify a path relative to your home directory. -(The /~user/ syntax, for specyfing a path relative to another -user's home directory is not supported.) Note that the tilde must -still follow the host path seperator ("/"). See exampels below. - -Example SRC_URIs: - -SRC_URI = "sftp://host.example.com/dir/path.file.txt" - -A path relative to your home directory. - -SRC_URI = "sftp://host.example.com/~/dir/path.file.txt" - -You can also specify a username (specyfing password in the -URI is not supported, use SSH keys to authenticate): - -SRC_URI = "sftp://user@host.example.com/dir/path.file.txt" - -""" - -# Copyright (C) 2013, Olof Johansson -# -# Based in part on bb.fetch2.wget: -# Copyright (C) 2003, 2004 Chris Larson -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import os -import bb -import urllib.request, urllib.parse, urllib.error -from bb.fetch2 import URI -from bb.fetch2 import FetchMethod -from bb.fetch2 import runfetchcmd - -class SFTP(FetchMethod): - """Class to fetch urls via 'sftp'""" - - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with sftp. - """ - return ud.type in ['sftp'] - - def recommends_checksum(self, urldata): - return True - - def urldata_init(self, ud, d): - if 'protocol' in ud.parm and ud.parm['protocol'] == 'git': - raise bb.fetch2.ParameterError( - "Invalid protocol - if you wish to fetch from a " + - "git repository using ssh, you need to use the " + - "git:// prefix with protocol=ssh", ud.url) - - if 'downloadfilename' in ud.parm: - ud.basename = ud.parm['downloadfilename'] - else: - ud.basename = os.path.basename(ud.path) - - ud.localfile = ud.basename - - def download(self, ud, d): - """Fetch urls""" - - urlo = URI(ud.url) - basecmd = 'sftp -oBatchMode=yes' - port = '' - if urlo.port: - port = '-P %d' % urlo.port - urlo.port = None - - dldir = d.getVar('DL_DIR') - lpath = os.path.join(dldir, ud.localfile) - - user = '' - if urlo.userinfo: - user = urlo.userinfo + '@' - - path = urlo.path - - # Supoprt URIs relative to the user's home directory, with - # the tilde syntax. (E.g. ). - if path[:3] == '/~/': - path = path[3:] - - remote = '"%s%s:%s"' % (user, urlo.hostname, path) - - cmd = '%s %s %s %s' % (basecmd, port, remote, lpath) - - bb.fetch2.check_network_access(d, cmd, ud.url) - runfetchcmd(cmd, d) - return True diff --git a/bitbake/lib/bb/fetch2/ssh.py b/bitbake/lib/bb/fetch2/ssh.py deleted file mode 100644 index 2a0f2cb44b..0000000000 --- a/bitbake/lib/bb/fetch2/ssh.py +++ /dev/null @@ -1,154 +0,0 @@ -''' -BitBake 'Fetch' implementations - -This implementation is for Secure Shell (SSH), and attempts to comply with the -IETF secsh internet draft: - http://tools.ietf.org/wg/secsh/draft-ietf-secsh-scp-sftp-ssh-uri/ - - Currently does not support the sftp parameters, as this uses scp - Also does not support the 'fingerprint' connection parameter. - - Please note that '/' is used as host, path separator not ':' as you may - be used to, also '~' can be used to specify user HOME, but again after '/' - - Example SRC_URI: - SRC_URI = "ssh://user@host.example.com/dir/path/file.txt" - SRC_URI = "ssh://user@host.example.com/~/file.txt" -''' - -# Copyright (C) 2006 OpenedHand Ltd. -# -# -# Based in part on svk.py: -# Copyright (C) 2006 Holger Hans Peter Freyther -# Based on svn.py: -# Copyright (C) 2003, 2004 Chris Larson -# Based on functions from the base bb module: -# Copyright 2003 Holger Schurig -# -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import re, os -from bb.fetch2 import check_network_access, FetchMethod, ParameterError, runfetchcmd -import urllib - - -__pattern__ = re.compile(r''' - \s* # Skip leading whitespace - ssh:// # scheme - ( # Optional username/password block - (?P\S+) # username - (:(?P\S+))? # colon followed by the password (optional) - (?P(;[^;]+)*)? # connection parameters block (optional) - @ - )? - (?P\S+?) # non-greedy match of the host - (:(?P[0-9]+))? # colon followed by the port (optional) - / - (?P[^;]+) # path on the remote system, may be absolute or relative, - # and may include the use of '~' to reference the remote home - # directory - (?P(;[^;]+)*)? # parameters block (optional) - $ -''', re.VERBOSE) - -class SSH(FetchMethod): - '''Class to fetch a module or modules via Secure Shell''' - - def supports(self, urldata, d): - return __pattern__.match(urldata.url) is not None - - def supports_checksum(self, urldata): - return False - - def urldata_init(self, urldata, d): - if 'protocol' in urldata.parm and urldata.parm['protocol'] == 'git': - raise ParameterError( - "Invalid protocol - if you wish to fetch from a git " + - "repository using ssh, you need to use " + - "git:// prefix with protocol=ssh", urldata.url) - m = __pattern__.match(urldata.url) - path = m.group('path') - path = urllib.parse.unquote(path) - host = m.group('host') - urldata.localfile = os.path.basename(os.path.normpath(path)) - - def download(self, urldata, d): - dldir = d.getVar('DL_DIR') - - m = __pattern__.match(urldata.url) - path = m.group('path') - host = m.group('host') - port = m.group('port') - user = m.group('user') - password = m.group('pass') - - if port: - portarg = '-P %s' % port - else: - portarg = '' - - if user: - fr = user - if password: - fr += ':%s' % password - fr += '@%s' % host - else: - fr = host - - if path[0] != '~': - path = '/%s' % path - path = urllib.parse.unquote(path) - - fr += ':%s' % path - - cmd = 'scp -B -r %s %s %s/' % ( - portarg, - fr, - dldir - ) - - check_network_access(d, cmd, urldata.url) - - runfetchcmd(cmd, d) - - def checkstatus(self, fetch, urldata, d): - """ - Check the status of the url - """ - m = __pattern__.match(urldata.url) - path = m.group('path') - host = m.group('host') - port = m.group('port') - user = m.group('user') - password = m.group('pass') - - if port: - portarg = '-P %s' % port - else: - portarg = '' - - if user: - fr = user - if password: - fr += ':%s' % password - fr += '@%s' % host - else: - fr = host - - if path[0] != '~': - path = '/%s' % path - path = urllib.parse.unquote(path) - - cmd = 'ssh -o BatchMode=true %s %s [ -f %s ]' % ( - portarg, - fr, - path - ) - - check_network_access(d, cmd, urldata.url) - runfetchcmd(cmd, d) - - return True diff --git a/bitbake/lib/bb/fetch2/svn.py b/bitbake/lib/bb/fetch2/svn.py deleted file mode 100644 index 0852108e7d..0000000000 --- a/bitbake/lib/bb/fetch2/svn.py +++ /dev/null @@ -1,215 +0,0 @@ -""" -BitBake 'Fetch' implementation for svn. - -""" - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2004 Marcin Juszkiewicz -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import os -import bb -import re -from bb.fetch2 import FetchMethod -from bb.fetch2 import FetchError -from bb.fetch2 import MissingParameterError -from bb.fetch2 import runfetchcmd -from bb.fetch2 import logger - -class Svn(FetchMethod): - """Class to fetch a module or modules from svn repositories""" - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with svn. - """ - return ud.type in ['svn'] - - def urldata_init(self, ud, d): - """ - init svn specific variable within url data - """ - if not "module" in ud.parm: - raise MissingParameterError('module', ud.url) - - ud.basecmd = d.getVar("FETCHCMD_svn") or "/usr/bin/env svn --non-interactive --trust-server-cert" - - ud.module = ud.parm["module"] - - if not "path_spec" in ud.parm: - ud.path_spec = ud.module - else: - ud.path_spec = ud.parm["path_spec"] - - # Create paths to svn checkouts - svndir = d.getVar("SVNDIR") or (d.getVar("DL_DIR") + "/svn") - relpath = self._strip_leading_slashes(ud.path) - ud.pkgdir = os.path.join(svndir, ud.host, relpath) - ud.moddir = os.path.join(ud.pkgdir, ud.path_spec) - # Protects the repository from concurrent updates, e.g. from two - # recipes fetching different revisions at the same time - ud.svnlock = os.path.join(ud.pkgdir, "svn.lock") - - ud.setup_revisions(d) - - if 'rev' in ud.parm: - ud.revision = ud.parm['rev'] - - # Whether to use the @REV peg-revision syntax in the svn command or not - ud.pegrevision = True - if 'nopegrevision' in ud.parm: - ud.pegrevision = False - - ud.localfile = d.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ["0", "1"][ud.pegrevision])) - - def _buildsvncommand(self, ud, d, command): - """ - Build up an svn commandline based on ud - command is "fetch", "update", "info" - """ - - proto = ud.parm.get('protocol', 'svn') - - svn_ssh = None - if proto == "svn+ssh" and "ssh" in ud.parm: - svn_ssh = ud.parm["ssh"] - - svnroot = ud.host + ud.path - - options = [] - - options.append("--no-auth-cache") - - if ud.user: - options.append("--username %s" % ud.user) - - if ud.pswd: - options.append("--password %s" % ud.pswd) - - if command == "info": - svncmd = "%s info %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module) - elif command == "log1": - svncmd = "%s log --limit 1 --quiet %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module) - else: - suffix = "" - - # externals may be either 'allowed' or 'nowarn', but not both. Allowed - # will not issue a warning, but will log to the debug buffer what has likely - # been downloaded by SVN. - if not ("externals" in ud.parm and ud.parm["externals"] == "allowed"): - options.append("--ignore-externals") - - if ud.revision: - options.append("-r %s" % ud.revision) - if ud.pegrevision: - suffix = "@%s" % (ud.revision) - - if command == "fetch": - transportuser = ud.parm.get("transportuser", "") - svncmd = "%s co %s %s://%s%s/%s%s %s" % (ud.basecmd, " ".join(options), proto, transportuser, svnroot, ud.module, suffix, ud.path_spec) - elif command == "update": - svncmd = "%s update %s" % (ud.basecmd, " ".join(options)) - else: - raise FetchError("Invalid svn command %s" % command, ud.url) - - if svn_ssh: - svncmd = "SVN_SSH=\"%s\" %s" % (svn_ssh, svncmd) - - return svncmd - - def download(self, ud, d): - """Fetch url""" - - logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'") - - lf = bb.utils.lockfile(ud.svnlock) - - try: - if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK): - svncmd = self._buildsvncommand(ud, d, "update") - logger.info("Update " + ud.url) - # We need to attempt to run svn upgrade first in case its an older working format - try: - runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir) - except FetchError: - pass - logger.debug("Running %s", svncmd) - bb.fetch2.check_network_access(d, svncmd, ud.url) - runfetchcmd(svncmd, d, workdir=ud.moddir) - else: - svncmd = self._buildsvncommand(ud, d, "fetch") - logger.info("Fetch " + ud.url) - # check out sources there - bb.utils.mkdirhier(ud.pkgdir) - logger.debug("Running %s", svncmd) - bb.fetch2.check_network_access(d, svncmd, ud.url) - runfetchcmd(svncmd, d, workdir=ud.pkgdir) - - if not ("externals" in ud.parm and ud.parm["externals"] == "nowarn"): - # Warn the user if this had externals (won't catch them all) - output = runfetchcmd("svn propget svn:externals || true", d, workdir=ud.moddir) - if output: - if "--ignore-externals" in svncmd.split(): - bb.warn("%s contains svn:externals." % ud.url) - bb.warn("These should be added to the recipe SRC_URI as necessary.") - bb.warn("svn fetch has ignored externals:\n%s" % output) - bb.warn("To disable this warning add ';externals=nowarn' to the url.") - else: - bb.debug(1, "svn repository has externals:\n%s" % output) - - scmdata = ud.parm.get("scmdata", "") - if scmdata == "keep": - tar_flags = "" - else: - tar_flags = "--exclude='.svn'" - - # tar them up to a defined filename - runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d, - cleanup=[ud.localpath], workdir=ud.pkgdir) - finally: - bb.utils.unlockfile(lf) - - def clean(self, ud, d): - """ Clean SVN specific files and dirs """ - - bb.utils.remove(ud.localpath) - bb.utils.remove(ud.moddir, True) - - - def supports_srcrev(self): - return True - - def _revision_key(self, ud, d, name): - """ - Return a unique key for the url - """ - return "svn:" + ud.moddir - - def _latest_revision(self, ud, d, name): - """ - Return the latest upstream revision number - """ - bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"), ud.url) - - output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "log1"), d, True) - - # skip the first line, as per output of svn log - # then we expect the revision on the 2nd line - revision = re.search('^r([0-9]*)', output.splitlines()[1]).group(1) - - return revision - - def sortable_revision(self, ud, d, name): - """ - Return a sortable revision number which in our case is the revision number - """ - - return False, self._build_revision(ud, d) - - def _build_revision(self, ud, d): - return ud.revision - - def supports_checksum(self, urldata): - return False diff --git a/bitbake/lib/bb/fetch2/wget.py b/bitbake/lib/bb/fetch2/wget.py deleted file mode 100644 index 4d19e2134b..0000000000 --- a/bitbake/lib/bb/fetch2/wget.py +++ /dev/null @@ -1,693 +0,0 @@ -""" -BitBake 'Fetch' implementations - -Classes for obtaining upstream sources for the -BitBake build tools. - -""" - -# Copyright (C) 2003, 2004 Chris Larson -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig - -import shlex -import re -import tempfile -import os -import errno -import bb -import bb.progress -import socket -import http.client -import urllib.request, urllib.parse, urllib.error -from bb.fetch2 import FetchMethod -from bb.fetch2 import FetchError -from bb.fetch2 import logger -from bb.fetch2 import runfetchcmd -from bs4 import BeautifulSoup -from bs4 import SoupStrainer - -class WgetProgressHandler(bb.progress.LineFilterProgressHandler): - """ - Extract progress information from wget output. - Note: relies on --progress=dot (with -v or without -q/-nv) being - specified on the wget command line. - """ - def __init__(self, d): - super(WgetProgressHandler, self).__init__(d) - # Send an initial progress event so the bar gets shown - self._fire_progress(0) - - def writeline(self, line): - percs = re.findall(r'(\d+)%\s+([\d.]+[A-Z])', line) - if percs: - progress = int(percs[-1][0]) - rate = percs[-1][1] + '/s' - self.update(progress, rate) - return False - return True - - -class Wget(FetchMethod): - """Class to fetch urls via 'wget'""" - - def check_certs(self, d): - """ - Should certificates be checked? - """ - return (d.getVar("BB_CHECK_SSL_CERTS") or "1") != "0" - - def supports(self, ud, d): - """ - Check to see if a given url can be fetched with wget. - """ - return ud.type in ['http', 'https', 'ftp', 'ftps'] - - def recommends_checksum(self, urldata): - return True - - def urldata_init(self, ud, d): - if 'protocol' in ud.parm: - if ud.parm['protocol'] == 'git': - raise bb.fetch2.ParameterError("Invalid protocol - if you wish to fetch from a git repository using http, you need to instead use the git:// prefix with protocol=http", ud.url) - - if 'downloadfilename' in ud.parm: - ud.basename = ud.parm['downloadfilename'] - else: - ud.basename = os.path.basename(ud.path) - - ud.localfile = ud.basename - if not ud.localfile: - ud.localfile = ud.host + ud.path.replace("/", ".") - - self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget --tries=2 --timeout=100" - - if ud.type == 'ftp' or ud.type == 'ftps': - self.basecmd += " --passive-ftp" - - if not self.check_certs(d): - self.basecmd += " --no-check-certificate" - - def _runwget(self, ud, d, command, quiet, workdir=None): - - progresshandler = WgetProgressHandler(d) - - logger.debug2("Fetching %s using command '%s'" % (ud.url, command)) - bb.fetch2.check_network_access(d, command, ud.url) - runfetchcmd(command + ' --progress=dot --verbose', d, quiet, log=progresshandler, workdir=workdir) - - def download(self, ud, d): - """Fetch urls""" - - fetchcmd = self.basecmd - - dldir = os.path.realpath(d.getVar("DL_DIR")) - localpath = os.path.join(dldir, ud.localfile) + ".tmp" - bb.utils.mkdirhier(os.path.dirname(localpath)) - fetchcmd += " --output-document=%s" % shlex.quote(localpath) - - if ud.user and ud.pswd: - fetchcmd += " --auth-no-challenge" - if ud.parm.get("redirectauth", "1") == "1": - # An undocumented feature of wget is that if the - # username/password are specified on the URI, wget will only - # send the Authorization header to the first host and not to - # any hosts that it is redirected to. With the increasing - # usage of temporary AWS URLs, this difference now matters as - # AWS will reject any request that has authentication both in - # the query parameters (from the redirect) and in the - # Authorization header. - fetchcmd += " --user=%s --password=%s" % (ud.user, ud.pswd) - - uri = ud.url.split(";")[0] - fetchcmd += " --continue --directory-prefix=%s '%s'" % (dldir, uri) - self._runwget(ud, d, fetchcmd, False) - - # Sanity check since wget can pretend it succeed when it didn't - # Also, this used to happen if sourceforge sent us to the mirror page - if not os.path.exists(localpath): - raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, localpath), uri) - - if os.path.getsize(localpath) == 0: - os.remove(localpath) - raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri) - - # Try and verify any checksum now, meaning if it isn't correct, we don't remove the - # original file, which might be a race (imagine two recipes referencing the same - # source, one with an incorrect checksum) - bb.fetch2.verify_checksum(ud, d, localpath=localpath, fatal_nochecksum=False) - - # Remove the ".tmp" and move the file into position atomically - # Our lock prevents multiple writers but mirroring code may grab incomplete files - os.rename(localpath, localpath[:-4]) - - return True - - def checkstatus(self, fetch, ud, d, try_again=True): - class HTTPConnectionCache(http.client.HTTPConnection): - if fetch.connection_cache: - def connect(self): - """Connect to the host and port specified in __init__.""" - - sock = fetch.connection_cache.get_connection(self.host, self.port) - if sock: - self.sock = sock - else: - self.sock = socket.create_connection((self.host, self.port), - self.timeout, self.source_address) - fetch.connection_cache.add_connection(self.host, self.port, self.sock) - - if self._tunnel_host: - self._tunnel() - - class CacheHTTPHandler(urllib.request.HTTPHandler): - def http_open(self, req): - return self.do_open(HTTPConnectionCache, req) - - def do_open(self, http_class, req): - """Return an addinfourl object for the request, using http_class. - - http_class must implement the HTTPConnection API from httplib. - The addinfourl return value is a file-like object. It also - has methods and attributes including: - - info(): return a mimetools.Message object for the headers - - geturl(): return the original request URL - - code: HTTP status code - """ - host = req.host - if not host: - raise urllib.error.URLError('no host given') - - h = http_class(host, timeout=req.timeout) # will parse host:port - h.set_debuglevel(self._debuglevel) - - headers = dict(req.unredirected_hdrs) - headers.update(dict((k, v) for k, v in list(req.headers.items()) - if k not in headers)) - - # We want to make an HTTP/1.1 request, but the addinfourl - # class isn't prepared to deal with a persistent connection. - # It will try to read all remaining data from the socket, - # which will block while the server waits for the next request. - # So make sure the connection gets closed after the (only) - # request. - - # Don't close connection when connection_cache is enabled, - if fetch.connection_cache is None: - headers["Connection"] = "close" - else: - headers["Connection"] = "Keep-Alive" # Works for HTTP/1.0 - - headers = dict( - (name.title(), val) for name, val in list(headers.items())) - - if req._tunnel_host: - tunnel_headers = {} - proxy_auth_hdr = "Proxy-Authorization" - if proxy_auth_hdr in headers: - tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] - # Proxy-Authorization should not be sent to origin - # server. - del headers[proxy_auth_hdr] - h.set_tunnel(req._tunnel_host, headers=tunnel_headers) - - try: - h.request(req.get_method(), req.selector, req.data, headers) - except socket.error as err: # XXX what error? - # Don't close connection when cache is enabled. - # Instead, try to detect connections that are no longer - # usable (for example, closed unexpectedly) and remove - # them from the cache. - if fetch.connection_cache is None: - h.close() - elif isinstance(err, OSError) and err.errno == errno.EBADF: - # This happens when the server closes the connection despite the Keep-Alive. - # Apparently urllib then uses the file descriptor, expecting it to be - # connected, when in reality the connection is already gone. - # We let the request fail and expect it to be - # tried once more ("try_again" in check_status()), - # with the dead connection removed from the cache. - # If it still fails, we give up, which can happen for bad - # HTTP proxy settings. - fetch.connection_cache.remove_connection(h.host, h.port) - raise urllib.error.URLError(err) - else: - try: - r = h.getresponse() - except TimeoutError as e: - if fetch.connection_cache: - fetch.connection_cache.remove_connection(h.host, h.port) - raise TimeoutError(e) - - # Pick apart the HTTPResponse object to get the addinfourl - # object initialized properly. - - # Wrap the HTTPResponse object in socket's file object adapter - # for Windows. That adapter calls recv(), so delegate recv() - # to read(). This weird wrapping allows the returned object to - # have readline() and readlines() methods. - - # XXX It might be better to extract the read buffering code - # out of socket._fileobject() and into a base class. - r.recv = r.read - - # no data, just have to read - r.read() - class fp_dummy(object): - def read(self): - return "" - def readline(self): - return "" - def close(self): - pass - closed = False - - resp = urllib.response.addinfourl(fp_dummy(), r.msg, req.get_full_url()) - resp.code = r.status - resp.msg = r.reason - - # Close connection when server request it. - if fetch.connection_cache is not None: - if 'Connection' in r.msg and r.msg['Connection'] == 'close': - fetch.connection_cache.remove_connection(h.host, h.port) - - return resp - - class HTTPMethodFallback(urllib.request.BaseHandler): - """ - Fallback to GET if HEAD is not allowed (405 HTTP error) - """ - def http_error_405(self, req, fp, code, msg, headers): - fp.read() - fp.close() - - if req.get_method() != 'GET': - newheaders = dict((k, v) for k, v in list(req.headers.items()) - if k.lower() not in ("content-length", "content-type")) - return self.parent.open(urllib.request.Request(req.get_full_url(), - headers=newheaders, - origin_req_host=req.origin_req_host, - unverifiable=True)) - - raise urllib.request.HTTPError(req, code, msg, headers, None) - - # Some servers (e.g. GitHub archives, hosted on Amazon S3) return 403 - # Forbidden when they actually mean 405 Method Not Allowed. - http_error_403 = http_error_405 - - - class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler): - """ - urllib2.HTTPRedirectHandler before 3.13 has two flaws: - - It resets the method to GET on redirect when we want to follow - redirects using the original method (typically HEAD). This was fixed - in 759e8e7. - - It also doesn't handle 308 (Permanent Redirect). This was fixed in - c379bc5. - - Until we depend on Python 3.13 onwards, copy the redirect_request - method to fix these issues. - """ - def redirect_request(self, req, fp, code, msg, headers, newurl): - m = req.get_method() - if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD") - or code in (301, 302, 303) and m == "POST")): - raise urllib.HTTPError(req.full_url, code, msg, headers, fp) - - # Strictly (according to RFC 2616), 301 or 302 in response to - # a POST MUST NOT cause a redirection without confirmation - # from the user (of urllib.request, in this case). In practice, - # essentially all clients do redirect in this case, so we do - # the same. - - # Be conciliant with URIs containing a space. This is mainly - # redundant with the more complete encoding done in http_error_302(), - # but it is kept for compatibility with other callers. - newurl = newurl.replace(' ', '%20') - - CONTENT_HEADERS = ("content-length", "content-type") - newheaders = {k: v for k, v in req.headers.items() - if k.lower() not in CONTENT_HEADERS} - return urllib.request.Request(newurl, - method="HEAD" if m == "HEAD" else "GET", - headers=newheaders, - origin_req_host=req.origin_req_host, - unverifiable=True) - - http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302 - - # We need to update the environment here as both the proxy and HTTPS - # handlers need variables set. The proxy needs http_proxy and friends to - # be set, and HTTPSHandler ends up calling into openssl to load the - # certificates. In buildtools configurations this will be looking at the - # wrong place for certificates by default: we set SSL_CERT_FILE to the - # right location in the buildtools environment script but as BitBake - # prunes prunes the environment this is lost. When binaries are executed - # runfetchcmd ensures these values are in the environment, but this is - # pure Python so we need to update the environment. - # - # Avoid tramping the environment too much by using bb.utils.environment - # to scope the changes to the build_opener request, which is when the - # environment lookups happen. - newenv = bb.fetch2.get_fetcher_environment(d) - - with bb.utils.environment(**newenv): - import ssl - - if self.check_certs(d): - context = ssl.create_default_context() - else: - context = ssl._create_unverified_context() - - handlers = [FixedHTTPRedirectHandler, - HTTPMethodFallback, - urllib.request.ProxyHandler(), - CacheHTTPHandler(), - urllib.request.HTTPSHandler(context=context)] - opener = urllib.request.build_opener(*handlers) - - try: - parts = urllib.parse.urlparse(ud.url.split(";")[0]) - if parts.query: - uri = "{}://{}{}?{}".format(parts.scheme, parts.netloc, parts.path, parts.query) - else: - uri = "{}://{}{}".format(parts.scheme, parts.netloc, parts.path) - r = urllib.request.Request(uri) - r.get_method = lambda: "HEAD" - # Some servers (FusionForge, as used on Alioth) require that the - # optional Accept header is set. - r.add_header("Accept", "*/*") - r.add_header("User-Agent", "bitbake/{}".format(bb.__version__)) - def add_basic_auth(login_str, request): - '''Adds Basic auth to http request, pass in login:password as string''' - import base64 - encodeuser = base64.b64encode(login_str.encode('utf-8')).decode("utf-8") - authheader = "Basic %s" % encodeuser - r.add_header("Authorization", authheader) - - if ud.user and ud.pswd: - add_basic_auth(ud.user + ':' + ud.pswd, r) - - try: - import netrc - auth_data = netrc.netrc().authenticators(urllib.parse.urlparse(uri).hostname) - if auth_data: - login, _, password = auth_data - add_basic_auth("%s:%s" % (login, password), r) - except (FileNotFoundError, netrc.NetrcParseError): - pass - - with opener.open(r, timeout=100) as response: - pass - except (urllib.error.URLError, ConnectionResetError, TimeoutError) as e: - if try_again: - logger.debug2("checkstatus: trying again") - return self.checkstatus(fetch, ud, d, False) - else: - # debug for now to avoid spamming the logs in e.g. remote sstate searches - logger.debug2("checkstatus() urlopen failed for %s: %s" % (uri,e)) - return False - - return True - - def _parse_path(self, regex, s): - """ - Find and group name, version and archive type in the given string s - """ - - m = regex.search(s) - if m: - pname = '' - pver = '' - ptype = '' - - mdict = m.groupdict() - if 'name' in mdict.keys(): - pname = mdict['name'] - if 'pver' in mdict.keys(): - pver = mdict['pver'] - if 'type' in mdict.keys(): - ptype = mdict['type'] - - bb.debug(3, "_parse_path: %s, %s, %s" % (pname, pver, ptype)) - - return (pname, pver, ptype) - - return None - - def _modelate_version(self, version): - if version[0] in ['.', '-']: - if version[1].isdigit(): - version = version[1] + version[0] + version[2:len(version)] - else: - version = version[1:len(version)] - - version = re.sub('-', '.', version) - version = re.sub('_', '.', version) - version = re.sub('(rc)+', '.1000.', version) - version = re.sub('(beta)+', '.100.', version) - version = re.sub('(alpha)+', '.10.', version) - if version[0] == 'v': - version = version[1:len(version)] - return version - - def _vercmp(self, old, new): - """ - Check whether 'new' is newer than 'old' version. We use existing vercmp() for the - purpose. PE is cleared in comparison as it's not for build, and PR is cleared too - for simplicity as it's somehow difficult to get from various upstream format - """ - - (oldpn, oldpv, oldsuffix) = old - (newpn, newpv, newsuffix) = new - - # Check for a new suffix type that we have never heard of before - if newsuffix: - m = self.suffix_regex_comp.search(newsuffix) - if not m: - bb.warn("%s has a possible unknown suffix: %s" % (newpn, newsuffix)) - return False - - # Not our package so ignore it - if oldpn != newpn: - return False - - oldpv = self._modelate_version(oldpv) - newpv = self._modelate_version(newpv) - - return bb.utils.vercmp(("0", oldpv, ""), ("0", newpv, "")) - - def _fetch_index(self, uri, ud, d): - """ - Run fetch checkstatus to get directory information - """ - f = tempfile.NamedTemporaryFile() - with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f: - fetchcmd = self.basecmd - fetchcmd += " --output-document=%s '%s'" % (f.name, uri) - try: - self._runwget(ud, d, fetchcmd, True, workdir=workdir) - fetchresult = f.read() - except bb.fetch2.BBFetchException: - fetchresult = "" - - return fetchresult - - def _check_latest_version(self, url, package, package_regex, current_version, ud, d): - """ - Return the latest version of a package inside a given directory path - If error or no version, return "" - """ - valid = 0 - version = ['', '', ''] - - bb.debug(3, "VersionURL: %s" % (url)) - soup = BeautifulSoup(self._fetch_index(url, ud, d), "html.parser", parse_only=SoupStrainer("a")) - if not soup: - bb.debug(3, "*** %s NO SOUP" % (url)) - return "" - - for line in soup.find_all('a', href=True): - bb.debug(3, "line['href'] = '%s'" % (line['href'])) - bb.debug(3, "line = '%s'" % (str(line))) - - newver = self._parse_path(package_regex, line['href']) - if not newver: - newver = self._parse_path(package_regex, str(line)) - - if newver: - bb.debug(3, "Upstream version found: %s" % newver[1]) - if valid == 0: - version = newver - valid = 1 - elif self._vercmp(version, newver) < 0: - version = newver - - pupver = re.sub('_', '.', version[1]) - - bb.debug(3, "*** %s -> UpstreamVersion = %s (CurrentVersion = %s)" % - (package, pupver or "N/A", current_version[1])) - - if valid: - return pupver - - return "" - - def _check_latest_version_by_dir(self, dirver, package, package_regex, current_version, ud, d): - """ - Scan every directory in order to get upstream version. - """ - version_dir = ['', '', ''] - version = ['', '', ''] - - dirver_regex = re.compile(r"(?P\D*)(?P(\d+[\.\-_])*(\d+))") - s = dirver_regex.search(dirver) - if s: - version_dir[1] = s.group('ver') - else: - version_dir[1] = dirver - - dirs_uri = bb.fetch.encodeurl([ud.type, ud.host, - ud.path.split(dirver)[0], ud.user, ud.pswd, {}]) - bb.debug(3, "DirURL: %s, %s" % (dirs_uri, package)) - - soup = BeautifulSoup(self._fetch_index(dirs_uri, ud, d), "html.parser", parse_only=SoupStrainer("a")) - if not soup: - return version[1] - - for line in soup.find_all('a', href=True): - s = dirver_regex.search(line['href'].strip("/")) - if s: - sver = s.group('ver') - - # When prefix is part of the version directory it need to - # ensure that only version directory is used so remove previous - # directories if exists. - # - # Example: pfx = '/dir1/dir2/v' and version = '2.5' the expected - # result is v2.5. - spfx = s.group('pfx').split('/')[-1] - - version_dir_new = ['', sver, ''] - if self._vercmp(version_dir, version_dir_new) <= 0: - dirver_new = spfx + sver - path = ud.path.replace(dirver, dirver_new, True) \ - .split(package)[0] - uri = bb.fetch.encodeurl([ud.type, ud.host, path, - ud.user, ud.pswd, {}]) - - pupver = self._check_latest_version(uri, - package, package_regex, current_version, ud, d) - if pupver: - version[1] = pupver - - version_dir = version_dir_new - - return version[1] - - def _init_regexes(self, package, ud, d): - """ - Match as many patterns as possible such as: - gnome-common-2.20.0.tar.gz (most common format) - gtk+-2.90.1.tar.gz - xf86-input-synaptics-12.6.9.tar.gz - dri2proto-2.3.tar.gz - blktool_4.orig.tar.gz - libid3tag-0.15.1b.tar.gz - unzip552.tar.gz - icu4c-3_6-src.tgz - genext2fs_1.3.orig.tar.gz - gst-fluendo-mp3 - """ - # match most patterns which uses "-" as separator to version digits - pn_prefix1 = r"[a-zA-Z][a-zA-Z0-9]*([-_][a-zA-Z]\w+)*\+?[-_]" - # a loose pattern such as for unzip552.tar.gz - pn_prefix2 = r"[a-zA-Z]+" - # a loose pattern such as for 80325-quicky-0.4.tar.gz - pn_prefix3 = r"[0-9]+[-]?[a-zA-Z]+" - # Save the Package Name (pn) Regex for use later - pn_regex = r"(%s|%s|%s)" % (pn_prefix1, pn_prefix2, pn_prefix3) - - # match version - pver_regex = r"(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)" - - # match arch - parch_regex = "-source|_all_" - - # src.rpm extension was added only for rpm package. Can be removed if the rpm - # packaged will always be considered as having to be manually upgraded - psuffix_regex = r"(tar\.\w+|tgz|zip|xz|rpm|bz2|orig\.tar\.\w+|src\.tar\.\w+|src\.tgz|svnr\d+\.tar\.\w+|stable\.tar\.\w+|src\.rpm)" - - # match name, version and archive type of a package - package_regex_comp = re.compile(r"(?P%s?\.?v?)(?P%s)(?P%s)?[\.-](?P%s$)" - % (pn_regex, pver_regex, parch_regex, psuffix_regex)) - self.suffix_regex_comp = re.compile(psuffix_regex) - - # compile regex, can be specific by package or generic regex - pn_regex = d.getVar('UPSTREAM_CHECK_REGEX') - if pn_regex: - package_custom_regex_comp = re.compile(pn_regex) - else: - version = self._parse_path(package_regex_comp, package) - if version: - package_custom_regex_comp = re.compile( - r"(?P%s)(?P%s)(?P%s)?[\.-](?P%s)" % - (re.escape(version[0]), pver_regex, parch_regex, psuffix_regex)) - else: - package_custom_regex_comp = None - - return package_custom_regex_comp - - def latest_versionstring(self, ud, d): - """ - Manipulate the URL and try to obtain the latest package version - - sanity check to ensure same name and type. - """ - if 'downloadfilename' in ud.parm: - package = ud.parm['downloadfilename'] - else: - package = ud.path.split("/")[-1] - current_version = ['', d.getVar('PV'), ''] - - """possible to have no version in pkg name, such as spectrum-fw""" - if not re.search(r"\d+", package): - current_version[1] = re.sub('_', '.', current_version[1]) - current_version[1] = re.sub('-', '.', current_version[1]) - bb.debug(3, "latest_versionstring: no version found in %s" % package) - return (current_version[1], '') - - package_regex = self._init_regexes(package, ud, d) - if package_regex is None: - bb.warn("latest_versionstring: package %s don't match pattern" % (package)) - return ('', '') - bb.debug(3, "latest_versionstring, regex: %s" % (package_regex.pattern)) - - uri = "" - regex_uri = d.getVar("UPSTREAM_CHECK_URI") - if not regex_uri: - path = ud.path.split(package)[0] - - # search for version matches on folders inside the path, like: - # "5.7" in http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz - dirver_regex = re.compile(r"(?P[^/]*(\d+\.)*\d+([-_]r\d+)*)/") - m = dirver_regex.findall(path) - if m: - pn = d.getVar('PN') - dirver = m[-1][0] - - dirver_pn_regex = re.compile(r"%s\d?" % (re.escape(pn))) - if not dirver_pn_regex.search(dirver): - return (self._check_latest_version_by_dir(dirver, - package, package_regex, current_version, ud, d), '') - - uri = bb.fetch.encodeurl([ud.type, ud.host, path, ud.user, ud.pswd, {}]) - else: - uri = regex_uri - - return (self._check_latest_version(uri, package, package_regex, - current_version, ud, d), '') diff --git a/bitbake/lib/bb/filter.py b/bitbake/lib/bb/filter.py deleted file mode 100644 index 0b5b5d92ca..0000000000 --- a/bitbake/lib/bb/filter.py +++ /dev/null @@ -1,142 +0,0 @@ -# -# Copyright (C) 2025 Garmin Ltd. or its subsidiaries -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import builtins - -# Purposely blank out __builtins__ which prevents users from -# calling any normal builtin python functions -FILTERS = { - "__builtins__": {}, -} - -CACHE = {} - - -def apply_filters(val, expressions): - g = FILTERS.copy() - - for e in expressions: - e = e.strip() - if not e: - continue - - k = (val, e) - if k not in CACHE: - # Set val as a local so it can be cleared out while keeping the - # globals - l = {"val": val} - - CACHE[k] = eval(e, g, l) - - val = CACHE[k] - - return val - - -class Namespace(object): - """ - Helper class to simulate a python namespace. The object properties can be - set as if it were a dictionary. Properties cannot be changed or deleted - through the object interface - """ - - def __getitem__(self, name): - return self.__dict__[name] - - def __setitem__(self, name, value): - self.__dict__[name] = value - - def __contains__(self, name): - return name in self.__dict__ - - def __setattr__(self, name, value): - raise AttributeError(f"Attribute {name!r} cannot be changed") - - def __delattr__(self, name): - raise AttributeError(f"Attribute {name!r} cannot be deleted") - - -def filter_proc(*, name=None): - """ - Decorator to mark a function that can be called in `apply_filters`, either - directly in a filter expression, or indirectly. The `name` argument can be - used to specify an alternate name for the function if the actual name is - not desired. The `name` can be a fully qualified namespace if desired. - - All functions must be "pure" in that they do not depend on global state and - have no global side effects (e.g. the output only depends on the input - arguments); the results of filter expressions are cached to optimize - repeated calls. - """ - - def inner(func): - global FILTERS - nonlocal name - - if name is None: - name = func.__name__ - - ns = name.split(".") - o = FILTERS - for n in ns[:-1]: - if not n in o: - o[n] = Namespace() - o = o[n] - - o[ns[-1]] = func - - return func - - return inner - - -# A select set of builtins that are supported in filter expressions -filter_proc()(all) -filter_proc()(all) -filter_proc()(any) -filter_proc()(bin) -filter_proc()(bool) -filter_proc()(chr) -filter_proc()(enumerate) -filter_proc()(float) -filter_proc()(format) -filter_proc()(hex) -filter_proc()(int) -filter_proc()(len) -filter_proc()(map) -filter_proc()(max) -filter_proc()(min) -filter_proc()(oct) -filter_proc()(ord) -filter_proc()(pow) -filter_proc()(str) -filter_proc()(sum) - - -@filter_proc() -def suffix(val, suffix): - return " ".join(v + suffix for v in val.split()) - - -@filter_proc() -def prefix(val, prefix): - return " ".join(prefix + v for v in val.split()) - - -@filter_proc() -def sort(val): - return " ".join(sorted(val.split())) - - -@filter_proc() -def remove(val, remove, sep=None): - if isinstance(remove, str): - remove = remove.split(sep) - new = [i for i in val.split(sep) if not i in remove] - - if not sep: - return " ".join(new) - return sep.join(new) diff --git a/bitbake/lib/bb/main.py b/bitbake/lib/bb/main.py deleted file mode 100755 index 597cb27846..0000000000 --- a/bitbake/lib/bb/main.py +++ /dev/null @@ -1,536 +0,0 @@ -# -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer -# Copyright (C) 2005 Holger Hans Peter Freyther -# Copyright (C) 2005 ROAD GmbH -# Copyright (C) 2006 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os -import sys -import logging -import argparse -import warnings -import fcntl -import time -import traceback -import datetime - -import bb -from bb import event -import bb.msg -from bb import cooker -from bb import ui -from bb import server -from bb import cookerdata - -import bb.server.process -import bb.server.xmlrpcclient - -logger = logging.getLogger("BitBake") - -class BBMainException(Exception): - pass - -class BBMainFatal(bb.BBHandledException): - pass - -def present_options(optionlist): - if len(optionlist) > 1: - return ' or '.join([', '.join(optionlist[:-1]), optionlist[-1]]) - else: - return optionlist[0] - -class BitbakeHelpFormatter(argparse.HelpFormatter): - def _get_help_string(self, action): - # We need to do this here rather than in the text we supply to - # add_option() because we don't want to call list_extension_modules() - # on every execution (since it imports all of the modules) - # Note also that we modify option.help rather than the returned text - # - this is so that we don't have to re-format the text ourselves - if action.dest == 'ui': - valid_uis = list_extension_modules(bb.ui, 'main') - return action.help.replace('@CHOICES@', present_options(valid_uis)) - - return action.help - -def list_extension_modules(pkg, checkattr): - """ - Lists extension modules in a specific Python package - (e.g. UIs, servers). NOTE: Calling this function will import all of the - submodules of the specified module in order to check for the specified - attribute; this can have unusual side-effects. As a result, this should - only be called when displaying help text or error messages. - Parameters: - pkg: previously imported Python package to list - checkattr: attribute to look for in module to determine if it's valid - as the type of extension you are looking for - """ - import pkgutil - pkgdir = os.path.dirname(pkg.__file__) - - modules = [] - for _, modulename, _ in pkgutil.iter_modules([pkgdir]): - if os.path.isdir(os.path.join(pkgdir, modulename)): - # ignore directories - continue - try: - module = __import__(pkg.__name__, fromlist=[modulename]) - except: - # If we can't import it, it's not valid - continue - module_if = getattr(module, modulename) - if getattr(module_if, 'hidden_extension', False): - continue - if not checkattr or hasattr(module_if, checkattr): - modules.append(modulename) - return modules - -def import_extension_module(pkg, modulename, checkattr): - try: - # Dynamically load the UI based on the ui name. Although we - # suggest a fixed set this allows you to have flexibility in which - # ones are available. - module = __import__(pkg.__name__, fromlist=[modulename]) - return getattr(module, modulename) - except AttributeError: - modules = present_options(list_extension_modules(pkg, checkattr)) - raise BBMainException('FATAL: Unable to import extension module "%s" from %s. ' - 'Valid extension modules: %s' % (modulename, pkg.__name__, modules)) - -# Display bitbake/OE warnings via the BitBake.Warnings logger, ignoring others""" -warnlog = logging.getLogger("BitBake.Warnings") -_warnings_showwarning = warnings.showwarning -def _showwarning(message, category, filename, lineno, file=None, line=None): - if file is not None: - if _warnings_showwarning is not None: - _warnings_showwarning(message, category, filename, lineno, file, line) - else: - s = warnings.formatwarning(message, category, filename, lineno) - warnlog.warning(s) - -warnings.showwarning = _showwarning - -def create_bitbake_parser(): - parser = argparse.ArgumentParser( - description="""\ - It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which - will provide the layer, BBFILES and other configuration information. - """, - formatter_class=BitbakeHelpFormatter, - allow_abbrev=False, - add_help=False, # help is manually added below in a specific argument group - ) - - general_group = parser.add_argument_group('General options') - task_group = parser.add_argument_group('Task control options') - exec_group = parser.add_argument_group('Execution control options') - logging_group = parser.add_argument_group('Logging/output control options') - server_group = parser.add_argument_group('Server options') - config_group = parser.add_argument_group('Configuration options') - - general_group.add_argument("targets", nargs="*", metavar="recipename/target", - help="Execute the specified task (default is 'build') for these target " - "recipes (.bb files).") - - general_group.add_argument("-s", "--show-versions", action="store_true", - help="Show current and preferred versions of all recipes.") - - general_group.add_argument("-e", "--environment", action="store_true", - dest="show_environment", - help="Show the global or per-recipe environment complete with information" - " about where variables were set/changed.") - - general_group.add_argument("-g", "--graphviz", action="store_true", dest="dot_graph", - help="Save dependency tree information for the specified " - "targets in the dot syntax.") - - # @CHOICES@ is substituted out by BitbakeHelpFormatter above - general_group.add_argument("-u", "--ui", - default=os.environ.get('BITBAKE_UI', 'knotty'), - help="The user interface to use (@CHOICES@ - default %(default)s).") - - general_group.add_argument("--version", action="store_true", - help="Show programs version and exit.") - - general_group.add_argument('-h', '--help', action='help', - help='Show this help message and exit.') - - - task_group.add_argument("-f", "--force", action="store_true", - help="Force the specified targets/task to run (invalidating any " - "existing stamp file).") - - task_group.add_argument("-c", "--cmd", - help="Specify the task to execute. The exact options available " - "depend on the metadata. Some examples might be 'compile'" - " or 'populate_sysroot' or 'listtasks' may give a list of " - "the tasks available.") - - task_group.add_argument("-C", "--clear-stamp", dest="invalidate_stamp", - help="Invalidate the stamp for the specified task such as 'compile' " - "and then run the default task for the specified target(s).") - - task_group.add_argument("--runall", action="append", default=[], - help="Run the specified task for any recipe in the taskgraph of the " - "specified target (even if it wouldn't otherwise have run).") - - task_group.add_argument("--runonly", action="append", - help="Run only the specified task within the taskgraph of the " - "specified targets (and any task dependencies those tasks may have).") - - task_group.add_argument("--no-setscene", action="store_true", - dest="nosetscene", - help="Do not run any setscene tasks. sstate will be ignored and " - "everything needed, built.") - - task_group.add_argument("--skip-setscene", action="store_true", - dest="skipsetscene", - help="Skip setscene tasks if they would be executed. Tasks previously " - "restored from sstate will be kept, unlike --no-setscene.") - - task_group.add_argument("--setscene-only", action="store_true", - dest="setsceneonly", - help="Only run setscene tasks, don't run any real tasks.") - - - exec_group.add_argument("-n", "--dry-run", action="store_true", - help="Don't execute, just go through the motions.") - - exec_group.add_argument("-p", "--parse-only", action="store_true", - help="Quit after parsing the BB recipes.") - - exec_group.add_argument("-k", "--continue", action="store_false", dest="halt", - help="Continue as much as possible after an error. While the target that " - "failed and anything depending on it cannot be built, as much as " - "possible will be built before stopping.") - - exec_group.add_argument("-P", "--profile", action="append", - default=[], - help="Profile the command and save reports. Specify 'main', 'idle' or 'parsing' " - "to indicate which bitbake code to profile.") - - exec_group.add_argument("-S", "--dump-signatures", action="append", - default=[], metavar="SIGNATURE_HANDLER", - help="Dump out the signature construction information, with no task " - "execution. The SIGNATURE_HANDLER parameter is passed to the " - "handler. Two common values are none and printdiff but the handler " - "may define more/less. none means only dump the signature, printdiff" - " means recursively compare the dumped signature with the most recent" - " one in a local build or sstate cache (can be used to find out why tasks re-run" - " when that is not expected)") - - exec_group.add_argument("--revisions-changed", action="store_true", - help="Set the exit code depending on whether upstream floating " - "revisions have changed or not.") - - exec_group.add_argument("-b", "--buildfile", - help="Execute tasks from a specific .bb recipe directly. WARNING: Does " - "not handle any dependencies from other recipes.") - - logging_group.add_argument("-D", "--debug", action="count", default=0, - help="Increase the debug level. You can specify this " - "more than once. -D sets the debug level to 1, " - "where only bb.debug(1, ...) messages are printed " - "to stdout; -DD sets the debug level to 2, where " - "both bb.debug(1, ...) and bb.debug(2, ...) " - "messages are printed; etc. Without -D, no debug " - "messages are printed. Note that -D only affects " - "output to stdout. All debug messages are written " - "to ${T}/log.do_taskname, regardless of the debug " - "level.") - - logging_group.add_argument("-l", "--log-domains", action="append", dest="debug_domains", - default=[], - help="Show debug logging for the specified logging domains.") - - logging_group.add_argument("-v", "--verbose", action="store_true", - help="Enable tracing of shell tasks (with 'set -x'). " - "Also print bb.note(...) messages to stdout (in " - "addition to writing them to ${T}/log.do_).") - - logging_group.add_argument("-q", "--quiet", action="count", default=0, - help="Output less log message data to the terminal. You can specify this " - "more than once.") - - logging_group.add_argument("-w", "--write-log", dest="writeeventlog", - default=os.environ.get("BBEVENTLOG"), - help="Writes the event log of the build to a bitbake event json file. " - "Use '' (empty string) to assign the name automatically.") - - - server_group.add_argument("-B", "--bind", default=False, - help="The name/address for the bitbake xmlrpc server to bind to.") - - server_group.add_argument("-T", "--idle-timeout", type=float, dest="server_timeout", - default=os.getenv("BB_SERVER_TIMEOUT"), - help="Set timeout to unload bitbake server due to inactivity, " - "set to -1 means no unload, " - "default: Environment variable BB_SERVER_TIMEOUT.") - - server_group.add_argument("--remote-server", - default=os.environ.get("BBSERVER"), - help="Connect to the specified server.") - - server_group.add_argument("-m", "--kill-server", action="store_true", - help="Terminate any running bitbake server.") - - server_group.add_argument("--token", dest="xmlrpctoken", - default=os.environ.get("BBTOKEN"), - help="Specify the connection token to be used when connecting " - "to a remote server.") - - server_group.add_argument("--observe-only", action="store_true", - help="Connect to a server as an observing-only client.") - - server_group.add_argument("--status-only", action="store_true", - help="Check the status of the remote bitbake server.") - - server_group.add_argument("--server-only", action="store_true", - help="Run bitbake without a UI, only starting a server " - "(cooker) process.") - - - config_group.add_argument("-r", "--read", action="append", dest="prefile", default=[], - help="Read the specified file before bitbake.conf.") - - config_group.add_argument("-R", "--postread", action="append", dest="postfile", default=[], - help="Read the specified file after bitbake.conf.") - - - config_group.add_argument("-I", "--ignore-deps", action="append", - dest="extra_assume_provided", default=[], - help="Assume these dependencies don't exist and are already provided " - "(equivalent to ASSUME_PROVIDED). Useful to make dependency " - "graphs more appealing.") - - return parser - - -class BitBakeConfigParameters(cookerdata.ConfigParameters): - def parseCommandLine(self, argv=sys.argv): - parser = create_bitbake_parser() - options = parser.parse_intermixed_args(argv[1:]) - - if options.version: - print("BitBake Build Tool Core version %s" % bb.__version__) - sys.exit(0) - - if options.quiet and options.verbose: - parser.error("options --quiet and --verbose are mutually exclusive") - - if options.quiet and options.debug: - parser.error("options --quiet and --debug are mutually exclusive") - - # use configuration files from environment variables - if "BBPRECONF" in os.environ: - options.prefile.append(os.environ["BBPRECONF"]) - - if "BBPOSTCONF" in os.environ: - options.postfile.append(os.environ["BBPOSTCONF"]) - - # fill in proper log name if not supplied - if options.writeeventlog is not None and len(options.writeeventlog) == 0: - from datetime import datetime - eventlog = "bitbake_eventlog_%s.json" % datetime.now().strftime("%Y%m%d%H%M%S") - options.writeeventlog = eventlog - - if options.bind: - try: - #Checking that the port is a number and is a ':' delimited value - (host, port) = options.bind.split(':') - port = int(port) - except (ValueError,IndexError): - raise BBMainException("FATAL: Malformed host:port bind parameter") - options.xmlrpcinterface = (host, port) - else: - options.xmlrpcinterface = (None, 0) - - return options, options.targets - - -def bitbake_main(configParams, configuration): - - # Python multiprocessing requires /dev/shm on Linux - if sys.platform.startswith('linux') and not os.access('/dev/shm', os.W_OK | os.X_OK): - raise BBMainException("FATAL: /dev/shm does not exist or is not writable") - - # Unbuffer stdout to avoid log truncation in the event - # of an unorderly exit as well as to provide timely - # updates to log files for use with tail - try: - if sys.stdout.name == '': - # Reopen with O_SYNC (unbuffered) - fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL) - fl |= os.O_SYNC - fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, fl) - except: - pass - - if configParams.server_only and configParams.remote_server: - raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" % - ("the BBSERVER environment variable" if "BBSERVER" in os.environ \ - else "the '--remote-server' option")) - - if configParams.observe_only and not (configParams.remote_server or configParams.bind): - raise BBMainException("FATAL: '--observe-only' can only be used by UI clients " - "connecting to a server.\n") - - if "BBDEBUG" in os.environ: - level = int(os.environ["BBDEBUG"]) - if level > configParams.debug: - configParams.debug = level - - bb.msg.init_msgconfig(configParams.verbose, configParams.debug, - configParams.debug_domains) - - server_connection, ui_module = setup_bitbake(configParams) - # No server connection - if server_connection is None: - if configParams.status_only: - return 1 - if configParams.kill_server: - return 0 - - if not configParams.server_only: - if configParams.status_only: - server_connection.terminate() - return 0 - - try: - for event in bb.event.ui_queue: - server_connection.events.queue_event(event) - bb.event.ui_queue = [] - - return ui_module.main(server_connection.connection, server_connection.events, - configParams) - finally: - server_connection.terminate() - else: - return 0 - - return 1 - -def timestamp(): - return datetime.datetime.now().strftime('%H:%M:%S.%f') - -def setup_bitbake(configParams, extrafeatures=None): - # Ensure logging messages get sent to the UI as events - handler = bb.event.LogHandler() - if not configParams.status_only: - # In status only mode there are no logs and no UI - logger.addHandler(handler) - - if configParams.dump_signatures: - if extrafeatures is None: - extrafeatures = [] - extrafeatures.append(bb.cooker.CookerFeatures.RECIPE_SIGGEN_INFO) - - if configParams.server_only: - featureset = [] - ui_module = None - else: - ui_module = import_extension_module(bb.ui, configParams.ui, 'main') - # Collect the feature set for the UI - featureset = getattr(ui_module, "featureSet", []) - - if extrafeatures: - for feature in extrafeatures: - if not feature in featureset: - featureset.append(feature) - - server_connection = None - - # Clear away any spurious environment variables while we stoke up the cooker - # (done after import_extension_module() above since for example import gi triggers env var usage) - cleanedvars = bb.utils.clean_environment() - - if configParams.remote_server: - # Connect to a remote XMLRPC server - server_connection = bb.server.xmlrpcclient.connectXMLRPC(configParams.remote_server, featureset, - configParams.observe_only, configParams.xmlrpctoken) - else: - retries = 8 - while retries: - try: - topdir, lock, lockfile = lockBitbake() - sockname = topdir + "/bitbake.sock" - if lock: - if configParams.status_only or configParams.kill_server: - logger.info("bitbake server is not running.") - lock.close() - return None, None - # we start a server with a given featureset - logger.info("Starting bitbake server...") - # Clear the event queue since we already displayed messages - bb.event.ui_queue = [] - server = bb.server.process.BitBakeServer(lock, sockname, featureset, configParams.server_timeout, configParams.xmlrpcinterface, configParams.profile) - - else: - logger.info("Reconnecting to bitbake server...") - if not os.path.exists(sockname): - logger.info("Previous bitbake instance shutting down?, waiting to retry... (%s)" % timestamp()) - procs = bb.server.process.get_lockfile_process_msg(lockfile) - if procs: - logger.info("Processes holding bitbake.lock (missing socket %s):\n%s" % (sockname, procs)) - logger.info("Directory listing: %s" % (str(os.listdir(topdir)))) - i = 0 - lock = None - # Wait for 5s or until we can get the lock - while not lock and i < 50: - time.sleep(0.1) - _, lock, _ = lockBitbake() - i += 1 - if lock: - bb.utils.unlockfile(lock) - raise bb.server.process.ProcessTimeout("Bitbake still shutting down as socket exists but no lock?") - if not configParams.server_only: - server_connection = bb.server.process.connectProcessServer(sockname, featureset) - - if server_connection or configParams.server_only: - break - except BBMainFatal: - raise - except (Exception, bb.server.process.ProcessTimeout, SystemExit) as e: - # SystemExit does not inherit from the Exception class, needs to be included explicitly - if not retries: - raise - retries -= 1 - tryno = 8 - retries - if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError, EOFError, SystemExit)): - logger.info("Retrying server connection (#%d)... (%s)" % (tryno, timestamp())) - else: - logger.info("Retrying server connection (#%d)... (%s, %s)" % (tryno, traceback.format_exc(), timestamp())) - - if not retries: - bb.fatal("Unable to connect to bitbake server, or start one (server startup failures would be in bitbake-cookerdaemon.log).") - bb.event.print_ui_queue() - if retries < 5: - time.sleep(5) - - if configParams.kill_server: - server_connection.connection.terminateServer() - server_connection.terminate() - bb.event.ui_queue = [] - logger.info("Terminated bitbake server.") - return None, None - - # Restore the environment in case the UI needs it - for k in cleanedvars: - os.environ[k] = cleanedvars[k] - - logger.removeHandler(handler) - - return server_connection, ui_module - -def lockBitbake(): - topdir = bb.cookerdata.findTopdir() - if not topdir: - bb.error("Unable to find conf/bblayers.conf or conf/bitbake.conf. BBPATH is unset and/or not in a build directory?") - raise BBMainFatal - lockfile = topdir + "/bitbake.lock" - return topdir, bb.utils.lockfile(lockfile, False, False), lockfile - diff --git a/bitbake/lib/bb/methodpool.py b/bitbake/lib/bb/methodpool.py deleted file mode 100644 index 51783acc1b..0000000000 --- a/bitbake/lib/bb/methodpool.py +++ /dev/null @@ -1,27 +0,0 @@ -# -# Copyright (C) 2006 Holger Hans Peter Freyther -# -# SPDX-License-Identifier: GPL-2.0-only -# - -from bb.utils import better_compile, better_exec - -def insert_method(modulename, code, fn, lineno): - """ - Add code of a module should be added. The methods - will be simply added, no checking will be done - """ - comp = better_compile(code, modulename, fn, lineno=lineno) - better_exec(comp, None, code, fn) - -compilecache = {} - -def compile_cache(code): - h = hash(code) - if h in compilecache: - return compilecache[h] - return None - -def compile_cache_add(code, compileobj): - h = hash(code) - compilecache[h] = compileobj diff --git a/bitbake/lib/bb/monitordisk.py b/bitbake/lib/bb/monitordisk.py deleted file mode 100644 index f928210351..0000000000 --- a/bitbake/lib/bb/monitordisk.py +++ /dev/null @@ -1,261 +0,0 @@ -# -# Copyright (C) 2012 Robert Yang -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import os, logging, re -import bb -logger = logging.getLogger("BitBake.Monitor") - -def printErr(info): - logger.error("%s\n Disk space monitor will NOT be enabled" % info) - -def convertGMK(unit): - - """ Convert the space unit G, M, K, the unit is case-insensitive """ - - unitG = re.match(r'([1-9][0-9]*)[gG]\s?$', unit) - if unitG: - return int(unitG.group(1)) * (1024 ** 3) - unitM = re.match(r'([1-9][0-9]*)[mM]\s?$', unit) - if unitM: - return int(unitM.group(1)) * (1024 ** 2) - unitK = re.match(r'([1-9][0-9]*)[kK]\s?$', unit) - if unitK: - return int(unitK.group(1)) * 1024 - unitN = re.match(r'([1-9][0-9]*)\s?$', unit) - if unitN: - return int(unitN.group(1)) - else: - return None - -def getMountedDev(path): - - """ Get the device mounted at the path, uses /proc/mounts """ - - # Get the mount point of the filesystem containing path - # st_dev is the ID of device containing file - parentDev = os.stat(path).st_dev - currentDev = parentDev - # When the current directory's device is different from the - # parent's, then the current directory is a mount point - while parentDev == currentDev: - mountPoint = path - # Use dirname to get the parent's directory - path = os.path.dirname(path) - # Reach the "/" - if path == mountPoint: - break - parentDev= os.stat(path).st_dev - - try: - with open("/proc/mounts", "r") as ifp: - for line in ifp: - procLines = line.rstrip('\n').split() - if procLines[1] == mountPoint: - return procLines[0] - except EnvironmentError: - pass - return None - -def getDiskData(BBDirs): - - """Prepare disk data for disk space monitor""" - - # Save the device IDs, need the ID to be unique (the dictionary's key is - # unique), so that when more than one directory is located on the same - # device, we just monitor it once - devDict = {} - for pathSpaceInode in BBDirs.split(): - # The input format is: "dir,space,inode", dir is a must, space - # and inode are optional - pathSpaceInodeRe = re.match(r'([^,]*),([^,]*),([^,]*),?(.*)', pathSpaceInode) - if not pathSpaceInodeRe: - printErr("Invalid value in BB_DISKMON_DIRS: %s" % pathSpaceInode) - return None - - action = pathSpaceInodeRe.group(1) - if action == "ABORT": - # Emit a deprecation warning - logger.warnonce("The BB_DISKMON_DIRS \"ABORT\" action has been renamed to \"HALT\", update configuration") - action = "HALT" - - if action not in ("HALT", "STOPTASKS", "WARN"): - printErr("Unknown disk space monitor action: %s" % action) - return None - - path = os.path.realpath(pathSpaceInodeRe.group(2)) - if not path: - printErr("Invalid path value in BB_DISKMON_DIRS: %s" % pathSpaceInode) - return None - - # The disk space or inode is optional, but it should have a correct - # value once it is specified - minSpace = pathSpaceInodeRe.group(3) - if minSpace: - minSpace = convertGMK(minSpace) - if not minSpace: - printErr("Invalid disk space value in BB_DISKMON_DIRS: %s" % pathSpaceInodeRe.group(3)) - return None - else: - # None means that it is not specified - minSpace = None - - minInode = pathSpaceInodeRe.group(4) - if minInode: - minInode = convertGMK(minInode) - if not minInode: - printErr("Invalid inode value in BB_DISKMON_DIRS: %s" % pathSpaceInodeRe.group(4)) - return None - else: - # None means that it is not specified - minInode = None - - if minSpace is None and minInode is None: - printErr("No disk space or inode value in found BB_DISKMON_DIRS: %s" % pathSpaceInode) - return None - # mkdir for the directory since it may not exist, for example the - # DL_DIR may not exist at the very beginning - if not os.path.exists(path): - bb.utils.mkdirhier(path) - dev = getMountedDev(path) - # Use path/action as the key - devDict[(path, action)] = [dev, minSpace, minInode] - - return devDict - -def getInterval(configuration): - - """ Get the disk space interval """ - - # The default value is 50M and 5K. - spaceDefault = 50 * 1024 * 1024 - inodeDefault = 5 * 1024 - - interval = configuration.getVar("BB_DISKMON_WARNINTERVAL") - if not interval: - return spaceDefault, inodeDefault - else: - # The disk space or inode interval is optional, but it should - # have a correct value once it is specified - intervalRe = re.match(r'([^,]*),?\s*(.*)', interval) - if intervalRe: - intervalSpace = intervalRe.group(1) - if intervalSpace: - intervalSpace = convertGMK(intervalSpace) - if not intervalSpace: - printErr("Invalid disk space interval value in BB_DISKMON_WARNINTERVAL: %s" % intervalRe.group(1)) - return None, None - else: - intervalSpace = spaceDefault - intervalInode = intervalRe.group(2) - if intervalInode: - intervalInode = convertGMK(intervalInode) - if not intervalInode: - printErr("Invalid disk inode interval value in BB_DISKMON_WARNINTERVAL: %s" % intervalRe.group(2)) - return None, None - else: - intervalInode = inodeDefault - return intervalSpace, intervalInode - else: - printErr("Invalid interval value in BB_DISKMON_WARNINTERVAL: %s" % interval) - return None, None - -class diskMonitor: - - """Prepare the disk space monitor data""" - - def __init__(self, configuration): - - self.enableMonitor = False - self.configuration = configuration - - BBDirs = configuration.getVar("BB_DISKMON_DIRS") or None - if BBDirs: - self.devDict = getDiskData(BBDirs) - if self.devDict: - self.spaceInterval, self.inodeInterval = getInterval(configuration) - if self.spaceInterval and self.inodeInterval: - self.enableMonitor = True - # These are for saving the previous disk free space and inode, we - # use them to avoid printing too many warning messages - self.preFreeS = {} - self.preFreeI = {} - # This is for STOPTASKS and HALT, to avoid printing the message - # repeatedly while waiting for the tasks to finish - self.checked = {} - for k in self.devDict: - self.preFreeS[k] = 0 - self.preFreeI[k] = 0 - self.checked[k] = False - if self.spaceInterval is None and self.inodeInterval is None: - self.enableMonitor = False - - def check(self, rq): - - """ Take action for the monitor """ - - if self.enableMonitor: - diskUsage = {} - for k, attributes in self.devDict.items(): - path, action = k - dev, minSpace, minInode = attributes - - st = os.statvfs(path) - - # The available free space, integer number - freeSpace = st.f_bavail * st.f_frsize - - # Send all relevant information in the event. - freeSpaceRoot = st.f_bfree * st.f_frsize - totalSpace = st.f_blocks * st.f_frsize - diskUsage[dev] = bb.event.DiskUsageSample(freeSpace, freeSpaceRoot, totalSpace) - - if minSpace and freeSpace < minSpace: - # Always show warning, the self.checked would always be False if the action is WARN - if self.preFreeS[k] == 0 or self.preFreeS[k] - freeSpace > self.spaceInterval and not self.checked[k]: - logger.warning("The free space of %s (%s) is running low (%.3fGB left)" % \ - (path, dev, freeSpace / 1024 / 1024 / 1024.0)) - self.preFreeS[k] = freeSpace - - if action == "STOPTASKS" and not self.checked[k]: - logger.error("No new tasks can be executed since the disk space monitor action is \"STOPTASKS\"!") - self.checked[k] = True - rq.finish_runqueue(False) - bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration) - elif action == "HALT" and not self.checked[k]: - logger.error("Immediately halt since the disk space monitor action is \"HALT\"!") - self.checked[k] = True - rq.finish_runqueue(True) - bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration) - - # The free inodes, integer number - freeInode = st.f_favail - - if minInode and freeInode < minInode: - # Some filesystems use dynamic inodes so can't run out. - # This is reported by the inode count being 0 (btrfs) or the free - # inode count being -1 (cephfs). - if st.f_files == 0 or st.f_favail == -1: - self.devDict[k][2] = None - continue - # Always show warning, the self.checked would always be False if the action is WARN - if self.preFreeI[k] == 0 or self.preFreeI[k] - freeInode > self.inodeInterval and not self.checked[k]: - logger.warning("The free inode of %s (%s) is running low (%.3fK left)" % \ - (path, dev, freeInode / 1024.0)) - self.preFreeI[k] = freeInode - - if action == "STOPTASKS" and not self.checked[k]: - logger.error("No new tasks can be executed since the disk space monitor action is \"STOPTASKS\"!") - self.checked[k] = True - rq.finish_runqueue(False) - bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration) - elif action == "HALT" and not self.checked[k]: - logger.error("Immediately halt since the disk space monitor action is \"HALT\"!") - self.checked[k] = True - rq.finish_runqueue(True) - bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration) - - bb.event.fire(bb.event.MonitorDiskEvent(diskUsage), self.configuration) - return diff --git a/bitbake/lib/bb/msg.py b/bitbake/lib/bb/msg.py deleted file mode 100644 index 4f616ff42e..0000000000 --- a/bitbake/lib/bb/msg.py +++ /dev/null @@ -1,353 +0,0 @@ -""" -BitBake 'msg' implementation - -Message handling infrastructure for bitbake - -""" - -# Copyright (C) 2006 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import sys -import copy -import logging -import logging.config -import os -from itertools import groupby -import bb -import bb.event - -class BBLogFormatter(logging.Formatter): - """Formatter which ensures that our 'plain' messages (logging.INFO + 1) are used as is""" - - DEBUG3 = logging.DEBUG - 2 - DEBUG2 = logging.DEBUG - 1 - DEBUG = logging.DEBUG - VERBOSE = logging.INFO - 1 - NOTE = logging.INFO - PLAIN = logging.INFO + 1 - VERBNOTE = logging.INFO + 2 - ERROR = logging.ERROR - ERRORONCE = logging.ERROR - 1 - WARNING = logging.WARNING - WARNONCE = logging.WARNING - 1 - CRITICAL = logging.CRITICAL - - levelnames = { - DEBUG3 : 'DEBUG', - DEBUG2 : 'DEBUG', - DEBUG : 'DEBUG', - VERBOSE: 'NOTE', - NOTE : 'NOTE', - PLAIN : '', - VERBNOTE: 'NOTE', - WARNING : 'WARNING', - WARNONCE : 'WARNING', - ERROR : 'ERROR', - ERRORONCE : 'ERROR', - CRITICAL: 'ERROR', - } - - color_enabled = False - BASECOLOR, BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(29,38)) - - COLORS = { - DEBUG3 : CYAN, - DEBUG2 : CYAN, - DEBUG : CYAN, - VERBOSE : BASECOLOR, - NOTE : BASECOLOR, - PLAIN : BASECOLOR, - VERBNOTE: BASECOLOR, - WARNING : YELLOW, - WARNONCE : YELLOW, - ERROR : RED, - ERRORONCE : RED, - CRITICAL: RED, - } - - BLD = '\033[1;%dm' - STD = '\033[%dm' - RST = '\033[0m' - - def getLevelName(self, levelno): - try: - return self.levelnames[levelno] - except KeyError: - self.levelnames[levelno] = value = 'Level %d' % levelno - return value - - def format(self, record): - record.levelname = self.getLevelName(record.levelno) - if record.levelno == self.PLAIN: - msg = record.getMessage() - else: - if self.color_enabled: - record = self.colorize(record) - msg = logging.Formatter.format(self, record) - if hasattr(record, 'bb_exc_formatted'): - msg += '\n' + ''.join(record.bb_exc_formatted) - return msg - - def colorize(self, record): - color = self.COLORS[record.levelno] - if self.color_enabled and color is not None: - record = copy.copy(record) - record.levelname = "".join([self.BLD % color, record.levelname, self.RST]) - record.msg = "".join([self.STD % color, record.msg, self.RST]) - return record - - def enable_color(self): - self.color_enabled = True - - def __repr__(self): - return "%s fmt='%s' color=%s" % (self.__class__.__name__, self._fmt, "True" if self.color_enabled else "False") - -class BBLogFilter(object): - def __init__(self, handler, level, debug_domains): - self.stdlevel = level - self.debug_domains = debug_domains - loglevel = level - for domain in debug_domains: - if debug_domains[domain] < loglevel: - loglevel = debug_domains[domain] - handler.setLevel(loglevel) - handler.addFilter(self) - - def filter(self, record): - if record.levelno >= self.stdlevel: - return True - if record.name in self.debug_domains and record.levelno >= self.debug_domains[record.name]: - return True - return False - -class LogFilterShowOnce(logging.Filter): - def __init__(self): - self.seen_warnings = set() - self.seen_errors = set() - - def filter(self, record): - if record.levelno == bb.msg.BBLogFormatter.WARNONCE: - if record.msg in self.seen_warnings: - return False - self.seen_warnings.add(record.msg) - if record.levelno == bb.msg.BBLogFormatter.ERRORONCE: - if record.msg in self.seen_errors: - return False - self.seen_errors.add(record.msg) - return True - -class LogFilterGEQLevel(logging.Filter): - def __init__(self, level): - self.strlevel = str(level) - self.level = stringToLevel(level) - - def __repr__(self): - return "%s level >= %s (%d)" % (self.__class__.__name__, self.strlevel, self.level) - - def filter(self, record): - return (record.levelno >= self.level) - -class LogFilterLTLevel(logging.Filter): - def __init__(self, level): - self.strlevel = str(level) - self.level = stringToLevel(level) - - def __repr__(self): - return "%s level < %s (%d)" % (self.__class__.__name__, self.strlevel, self.level) - - def filter(self, record): - return (record.levelno < self.level) - -# Message control functions -# - -loggerDefaultLogLevel = BBLogFormatter.NOTE -loggerDefaultDomains = {} - -def init_msgconfig(verbose, debug, debug_domains=None): - """ - Set default verbosity and debug levels config the logger - """ - if debug: - bb.msg.loggerDefaultLogLevel = BBLogFormatter.DEBUG - debug + 1 - elif verbose: - bb.msg.loggerDefaultLogLevel = BBLogFormatter.VERBOSE - else: - bb.msg.loggerDefaultLogLevel = BBLogFormatter.NOTE - - bb.msg.loggerDefaultDomains = {} - if debug_domains: - for (domainarg, iterator) in groupby(debug_domains): - dlevel = len(tuple(iterator)) - bb.msg.loggerDefaultDomains["BitBake.%s" % domainarg] = logging.DEBUG - dlevel + 1 - -def constructLogOptions(): - return loggerDefaultLogLevel, loggerDefaultDomains - -def addDefaultlogFilter(handler, cls = BBLogFilter, forcelevel=None): - level, debug_domains = constructLogOptions() - - if forcelevel is not None: - level = forcelevel - - cls(handler, level, debug_domains) - -def stringToLevel(level): - try: - return int(level) - except ValueError: - pass - - try: - return getattr(logging, level) - except AttributeError: - pass - - return getattr(BBLogFormatter, level) - -# -# Message handling functions -# - -def fatal(msgdomain, msg): - if msgdomain: - logger = logging.getLogger("BitBake.%s" % msgdomain) - else: - logger = logging.getLogger("BitBake") - logger.critical(msg) - sys.exit(1) - -def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers=False, color='auto'): - """Standalone logger creation function""" - logger = logging.getLogger(name) - console = logging.StreamHandler(output) - console.addFilter(bb.msg.LogFilterShowOnce()) - format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s") - if color == 'always' or (color == 'auto' and output.isatty() and os.environ.get('NO_COLOR', '') == ''): - format.enable_color() - console.setFormatter(format) - if preserve_handlers: - logger.addHandler(console) - else: - logger.handlers = [console] - logger.setLevel(level) - return logger - -def has_console_handler(logger): - for handler in logger.handlers: - if isinstance(handler, logging.StreamHandler): - if handler.stream in [sys.stderr, sys.stdout]: - return True - return False - -def mergeLoggingConfig(logconfig, userconfig): - logconfig = copy.deepcopy(logconfig) - userconfig = copy.deepcopy(userconfig) - - # Merge config with the default config - if userconfig.get('version') != logconfig['version']: - raise BaseException("Bad user configuration version. Expected %r, got %r" % (logconfig['version'], userconfig.get('version'))) - - # Set some defaults to make merging easier - userconfig.setdefault("loggers", {}) - - # If a handler, formatter, or filter is defined in the user - # config, it will replace an existing one in the default config - for k in ("handlers", "formatters", "filters"): - logconfig.setdefault(k, {}).update(userconfig.get(k, {})) - - seen_loggers = set() - for name, l in logconfig["loggers"].items(): - # If the merge option is set, merge the handlers and - # filters. Otherwise, if it is False, this logger won't get - # add to the set of seen loggers and will replace the - # existing one - if l.get('bitbake_merge', True): - ulogger = userconfig["loggers"].setdefault(name, {}) - ulogger.setdefault("handlers", []) - ulogger.setdefault("filters", []) - - # Merge lists - l.setdefault("handlers", []).extend(ulogger["handlers"]) - l.setdefault("filters", []).extend(ulogger["filters"]) - - # Replace other properties if present - if "level" in ulogger: - l["level"] = ulogger["level"] - - if "propagate" in ulogger: - l["propagate"] = ulogger["propagate"] - - seen_loggers.add(name) - - # Add all loggers present in the user config, but not any that - # have already been processed - for name in set(userconfig["loggers"].keys()) - seen_loggers: - logconfig["loggers"][name] = userconfig["loggers"][name] - - return logconfig - -def setLoggingConfig(defaultconfig, userconfigfile=None): - logconfig = copy.deepcopy(defaultconfig) - - if userconfigfile: - with open(os.path.normpath(userconfigfile), 'r') as f: - if userconfigfile.endswith('.yml') or userconfigfile.endswith('.yaml'): - import yaml - userconfig = yaml.safe_load(f) - elif userconfigfile.endswith('.json') or userconfigfile.endswith('.cfg'): - import json - userconfig = json.load(f) - else: - raise BaseException("Unrecognized file format: %s" % userconfigfile) - - if userconfig.get('bitbake_merge', True): - logconfig = mergeLoggingConfig(logconfig, userconfig) - else: - # Replace the entire default config - logconfig = userconfig - - # Convert all level parameters to integers in case users want to use the - # bitbake defined level names - for name, h in logconfig["handlers"].items(): - if "level" in h: - h["level"] = bb.msg.stringToLevel(h["level"]) - - # Every handler needs its own instance of the once filter. - once_filter_name = name + ".showonceFilter" - logconfig.setdefault("filters", {})[once_filter_name] = { - "()": "bb.msg.LogFilterShowOnce", - } - h.setdefault("filters", []).append(once_filter_name) - - for l in logconfig["loggers"].values(): - if "level" in l: - l["level"] = bb.msg.stringToLevel(l["level"]) - - conf = logging.config.dictConfigClass(logconfig) - conf.configure() - - # The user may have specified logging domains they want at a higher debug - # level than the standard. - for name, l in logconfig["loggers"].items(): - if not name.startswith("BitBake."): - continue - - if not "level" in l: - continue - - curlevel = bb.msg.loggerDefaultDomains.get(name) - # Note: level parameter should already be a int because of conversion - # above - newlevel = int(l["level"]) - if curlevel is None or newlevel < curlevel: - bb.msg.loggerDefaultDomains[name] = newlevel - - # TODO: I don't think that setting the global log level should be necessary - #if newlevel < bb.msg.loggerDefaultLogLevel: - # bb.msg.loggerDefaultLogLevel = newlevel - - return conf diff --git a/bitbake/lib/bb/namedtuple_with_abc.py b/bitbake/lib/bb/namedtuple_with_abc.py deleted file mode 100644 index e46dbf0849..0000000000 --- a/bitbake/lib/bb/namedtuple_with_abc.py +++ /dev/null @@ -1,249 +0,0 @@ -# http://code.activestate.com/recipes/577629-namedtupleabc-abstract-base-class-mix-in-for-named/ -# Copyright (c) 2011 Jan Kaliszewski (zuo). Available under the MIT License. -# -# SPDX-License-Identifier: MIT -# - -""" -namedtuple_with_abc.py: -* named tuple mix-in + ABC (abstract base class) recipe, -* works under Python 2.6, 2.7 as well as 3.x. - -Import this module to patch collections.namedtuple() factory function --- enriching it with the 'abc' attribute (an abstract base class + mix-in -for named tuples) and decorating it with a wrapper that registers each -newly created named tuple as a subclass of namedtuple.abc. - -How to import: - import collections, namedtuple_with_abc -or: - import namedtuple_with_abc - from collections import namedtuple - # ^ in this variant you must import namedtuple function - # *after* importing namedtuple_with_abc module -or simply: - from namedtuple_with_abc import namedtuple - -Simple usage example: - class Credentials(namedtuple.abc): - _fields = 'username password' - def __str__(self): - return ('{0.__class__.__name__}' - '(username={0.username}, password=...)'.format(self)) - print(Credentials("alice", "Alice's password")) - -For more advanced examples -- see below the "if __name__ == '__main__':". -""" - -import collections -from abc import ABCMeta, abstractproperty -from functools import wraps -from sys import version_info - -__all__ = ('namedtuple',) -_namedtuple = collections.namedtuple - - -class _NamedTupleABCMeta(ABCMeta): - '''The metaclass for the abstract base class + mix-in for named tuples.''' - def __new__(mcls, name, bases, namespace): - fields = namespace.get('_fields') - for base in bases: - if fields is not None: - break - fields = getattr(base, '_fields', None) - if not isinstance(fields, abstractproperty): - basetuple = _namedtuple(name, fields) - bases = (basetuple,) + bases - namespace.pop('_fields', None) - namespace.setdefault('__doc__', basetuple.__doc__) - namespace.setdefault('__slots__', ()) - return ABCMeta.__new__(mcls, name, bases, namespace) - - -class _NamedTupleABC(metaclass=_NamedTupleABCMeta): - '''The abstract base class + mix-in for named tuples.''' - _fields = abstractproperty() - - -_namedtuple.abc = _NamedTupleABC -#_NamedTupleABC.register(type(version_info)) # (and similar, in the future...) - -@wraps(_namedtuple) -def namedtuple(*args, **kwargs): - '''Named tuple factory with namedtuple.abc subclass registration.''' - cls = _namedtuple(*args, **kwargs) - _NamedTupleABC.register(cls) - return cls - -collections.namedtuple = namedtuple - - - - -if __name__ == '__main__': - - '''Examples and explanations''' - - # Simple usage - - class MyRecord(namedtuple.abc): - _fields = 'x y z' # such form will be transformed into ('x', 'y', 'z') - def _my_custom_method(self): - return list(self._asdict().items()) - # (the '_fields' attribute belongs to the named tuple public API anyway) - - rec = MyRecord(1, 2, 3) - print(rec) - print(rec._my_custom_method()) - print(rec._replace(y=222)) - print(rec._replace(y=222)._my_custom_method()) - - # Custom abstract classes... - - class MyAbstractRecord(namedtuple.abc): - def _my_custom_method(self): - return list(self._asdict().items()) - - try: - MyAbstractRecord() # (abstract classes cannot be instantiated) - except TypeError as exc: - print(exc) - - class AnotherAbstractRecord(MyAbstractRecord): - def __str__(self): - return '<<<{0}>>>'.format(super(AnotherAbstractRecord, - self).__str__()) - - # ...and their non-abstract subclasses - - class MyRecord2(MyAbstractRecord): - _fields = 'a, b' - - class MyRecord3(AnotherAbstractRecord): - _fields = 'p', 'q', 'r' - - rec2 = MyRecord2('foo', 'bar') - print(rec2) - print(rec2._my_custom_method()) - print(rec2._replace(b=222)) - print(rec2._replace(b=222)._my_custom_method()) - - rec3 = MyRecord3('foo', 'bar', 'baz') - print(rec3) - print(rec3._my_custom_method()) - print(rec3._replace(q=222)) - print(rec3._replace(q=222)._my_custom_method()) - - # You can also subclass non-abstract ones... - - class MyRecord33(MyRecord3): - def __str__(self): - return '< {0!r}, ..., {0!r} >'.format(self.p, self.r) - - rec33 = MyRecord33('foo', 'bar', 'baz') - print(rec33) - print(rec33._my_custom_method()) - print(rec33._replace(q=222)) - print(rec33._replace(q=222)._my_custom_method()) - - # ...and even override the magic '_fields' attribute again - - class MyRecord345(MyRecord3): - _fields = 'e f g h i j k' - - rec345 = MyRecord345(1, 2, 3, 4, 3, 2, 1) - print(rec345) - print(rec345._my_custom_method()) - print(rec345._replace(f=222)) - print(rec345._replace(f=222)._my_custom_method()) - - # Mixing-in some other classes is also possible: - - class MyMixIn(object): - def method(self): - return "MyMixIn.method() called" - def _my_custom_method(self): - return "MyMixIn._my_custom_method() called" - def count(self, item): - return "MyMixIn.count({0}) called".format(item) - def _asdict(self): # (cannot override a namedtuple method, see below) - return "MyMixIn._asdict() called" - - class MyRecord4(MyRecord33, MyMixIn): # mix-in on the right - _fields = 'j k l x' - - class MyRecord5(MyMixIn, MyRecord33): # mix-in on the left - _fields = 'j k l x y' - - rec4 = MyRecord4(1, 2, 3, 2) - print(rec4) - print(rec4.method()) - print(rec4._my_custom_method()) # MyRecord33's - print(rec4.count(2)) # tuple's - print(rec4._replace(k=222)) - print(rec4._replace(k=222).method()) - print(rec4._replace(k=222)._my_custom_method()) # MyRecord33's - print(rec4._replace(k=222).count(8)) # tuple's - - rec5 = MyRecord5(1, 2, 3, 2, 1) - print(rec5) - print(rec5.method()) - print(rec5._my_custom_method()) # MyMixIn's - print(rec5.count(2)) # MyMixIn's - print(rec5._replace(k=222)) - print(rec5._replace(k=222).method()) - print(rec5._replace(k=222)._my_custom_method()) # MyMixIn's - print(rec5._replace(k=222).count(2)) # MyMixIn's - - # Note that behavior: the standard namedtuple methods cannot be - # overridden by a foreign mix-in -- even if the mix-in is declared - # as the leftmost base class (but, obviously, you can override them - # in the defined class or its subclasses): - - print(rec4._asdict()) # (returns a dict, not "MyMixIn._asdict() called") - print(rec5._asdict()) # (returns a dict, not "MyMixIn._asdict() called") - - class MyRecord6(MyRecord33): - _fields = 'j k l x y z' - def _asdict(self): - return "MyRecord6._asdict() called" - rec6 = MyRecord6(1, 2, 3, 1, 2, 3) - print(rec6._asdict()) # (this returns "MyRecord6._asdict() called") - - # All that record classes are real subclasses of namedtuple.abc: - - assert issubclass(MyRecord, namedtuple.abc) - assert issubclass(MyAbstractRecord, namedtuple.abc) - assert issubclass(AnotherAbstractRecord, namedtuple.abc) - assert issubclass(MyRecord2, namedtuple.abc) - assert issubclass(MyRecord3, namedtuple.abc) - assert issubclass(MyRecord33, namedtuple.abc) - assert issubclass(MyRecord345, namedtuple.abc) - assert issubclass(MyRecord4, namedtuple.abc) - assert issubclass(MyRecord5, namedtuple.abc) - assert issubclass(MyRecord6, namedtuple.abc) - - # ...but abstract ones are not subclasses of tuple - # (and this is what you probably want): - - assert not issubclass(MyAbstractRecord, tuple) - assert not issubclass(AnotherAbstractRecord, tuple) - - assert issubclass(MyRecord, tuple) - assert issubclass(MyRecord2, tuple) - assert issubclass(MyRecord3, tuple) - assert issubclass(MyRecord33, tuple) - assert issubclass(MyRecord345, tuple) - assert issubclass(MyRecord4, tuple) - assert issubclass(MyRecord5, tuple) - assert issubclass(MyRecord6, tuple) - - # Named tuple classes created with namedtuple() factory function - # (in the "traditional" way) are registered as "virtual" subclasses - # of namedtuple.abc: - - MyTuple = namedtuple('MyTuple', 'a b c') - mt = MyTuple(1, 2, 3) - assert issubclass(MyTuple, namedtuple.abc) - assert isinstance(mt, namedtuple.abc) diff --git a/bitbake/lib/bb/parse/__init__.py b/bitbake/lib/bb/parse/__init__.py deleted file mode 100644 index d428d8a4b4..0000000000 --- a/bitbake/lib/bb/parse/__init__.py +++ /dev/null @@ -1,216 +0,0 @@ -""" -BitBake Parsers - -File parsers for the BitBake build tools. - -""" - - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig -# - -handlers = [] - -import errno -import logging -import os -import stat -import bb -import bb.utils -import bb.siggen - -logger = logging.getLogger("BitBake.Parsing") - -class ParseError(Exception): - """Exception raised when parsing fails""" - def __init__(self, msg, filename, lineno=0): - self.msg = msg - self.filename = filename - self.lineno = lineno - Exception.__init__(self, msg, filename, lineno) - - def __str__(self): - if self.lineno: - return "ParseError at %s:%d: %s" % (self.filename, self.lineno, self.msg) - else: - return "ParseError in %s: %s" % (self.filename, self.msg) - -class SkipRecipe(Exception): - """Exception raised to skip this recipe""" - -class SkipPackage(SkipRecipe): - """Exception raised to skip this recipe (use SkipRecipe in new code)""" - -__mtime_cache = {} -def cached_mtime(f): - if f not in __mtime_cache: - res = os.stat(f) - __mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino) - return __mtime_cache[f] - -def cached_mtime_noerror(f): - if f not in __mtime_cache: - try: - res = os.stat(f) - __mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino) - except OSError: - return 0 - return __mtime_cache[f] - -def check_mtime(f, mtime): - try: - res = os.stat(f) - current_mtime = (res.st_mtime_ns, res.st_size, res.st_ino) - __mtime_cache[f] = current_mtime - except OSError: - current_mtime = 0 - return current_mtime == mtime - -def update_mtime(f): - try: - res = os.stat(f) - __mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino) - except OSError: - if f in __mtime_cache: - del __mtime_cache[f] - return 0 - return __mtime_cache[f] - -def update_cache(f): - if f in __mtime_cache: - logger.debug("Updating mtime cache for %s" % f) - update_mtime(f) - -def clear_cache(): - global __mtime_cache - __mtime_cache = {} - -def mark_dependency(d, f): - if f.startswith('./'): - f = "%s/%s" % (os.getcwd(), f[2:]) - deps = (d.getVar('__depends', False) or []) - s = (f, cached_mtime_noerror(f)) - if s not in deps: - deps.append(s) - d.setVar('__depends', deps) - -def check_dependency(d, f): - s = (f, cached_mtime_noerror(f)) - deps = (d.getVar('__depends', False) or []) - return s in deps - -def supports(fn, data): - """Returns true if we have a handler for this file, false otherwise""" - for h in handlers: - if h['supports'](fn, data): - return 1 - return 0 - -def handle(fn, data, include=0, baseconfig=False): - """Call the handler that is appropriate for this file""" - for h in handlers: - if h['supports'](fn, data): - with data.inchistory.include(fn): - return h['handle'](fn, data, include, baseconfig) - raise ParseError("not a BitBake file", fn) - -def init(fn, data): - for h in handlers: - if h['supports'](fn): - return h['init'](data) - -def init_parser(d): - if hasattr(bb.parse, "siggen"): - bb.parse.siggen.exit() - bb.parse.siggen = bb.siggen.init(d) - -def resolve_file(fn, d): - if not os.path.isabs(fn): - bbpath = d.getVar("BBPATH") - newfn, attempts = bb.utils.which(bbpath, fn, history=True) - for af in attempts: - mark_dependency(d, af) - if not newfn: - raise IOError(errno.ENOENT, "file %s not found in %s" % (fn, bbpath)) - fn = newfn - else: - mark_dependency(d, fn) - - if not os.path.isfile(fn): - raise IOError(errno.ENOENT, "file %s not found" % fn) - - return fn - -# Used by OpenEmbedded metadata -__pkgsplit_cache__={} -def vars_from_file(mypkg, d): - if not mypkg or not mypkg.endswith((".bb", ".bbappend")): - return (None, None, None) - if mypkg in __pkgsplit_cache__: - return __pkgsplit_cache__[mypkg] - - myfile = os.path.splitext(os.path.basename(mypkg)) - parts = myfile[0].split('_') - __pkgsplit_cache__[mypkg] = parts - if len(parts) > 3: - raise ParseError("Unable to generate default variables from filename (too many underscores)", mypkg) - exp = 3 - len(parts) - tmplist = [] - while exp != 0: - exp -= 1 - tmplist.append(None) - parts.extend(tmplist) - return parts - -def get_file_depends(d): - '''Return the dependent files''' - dep_files = [] - depends = d.getVar('__base_depends', False) or [] - depends = depends + (d.getVar('__depends', False) or []) - for (fn, _) in depends: - dep_files.append(os.path.abspath(fn)) - return " ".join(dep_files) - -def vardeps(*varnames): - """ - Function decorator that can be used to instruct the bitbake dependency - parsing to add a dependency on the specified variables names - - Example: - - @bb.parse.vardeps("FOO", "BAR") - def my_function(): - ... - - """ - def inner(f): - if not hasattr(f, "bb_vardeps"): - f.bb_vardeps = set() - f.bb_vardeps |= set(varnames) - return f - return inner - -def vardepsexclude(*varnames): - """ - Function decorator that can be used to instruct the bitbake dependency - parsing to ignore dependencies on the specified variable names in the code - - Example: - - @bb.parse.vardepsexclude("FOO", "BAR") - def my_function(): - ... - """ - def inner(f): - if not hasattr(f, "bb_vardepsexclude"): - f.bb_vardepsexclude = set() - f.bb_vardepsexclude |= set(varnames) - return f - return inner - -from bb.parse.parse_py import __version__, ConfHandler, BBHandler diff --git a/bitbake/lib/bb/parse/ast.py b/bitbake/lib/bb/parse/ast.py deleted file mode 100644 index cfead466e1..0000000000 --- a/bitbake/lib/bb/parse/ast.py +++ /dev/null @@ -1,620 +0,0 @@ -""" - AbstractSyntaxTree classes for the Bitbake language -""" - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# Copyright (C) 2009 Holger Hans Peter Freyther -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import sys -import bb -from bb import methodpool -from bb.parse import logger - -class StatementGroup(list): - def eval(self, data): - for statement in self: - statement.eval(data) - -class AstNode(object): - def __init__(self, filename, lineno): - self.filename = filename - self.lineno = lineno - -class IncludeNode(AstNode): - def __init__(self, filename, lineno, what_file, force): - AstNode.__init__(self, filename, lineno) - self.what_file = what_file - self.force = force - - def eval(self, data): - """ - Include the file and evaluate the statements - """ - s = data.expand(self.what_file) - logger.debug2("CONF %s:%s: including %s", self.filename, self.lineno, s) - - # TODO: Cache those includes... maybe not here though - if self.force: - bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, "include required") - else: - bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, False) - -class IncludeAllNode(AstNode): - def __init__(self, filename, lineno, what_file): - AstNode.__init__(self, filename, lineno) - self.what_file = what_file - - def eval(self, data): - """ - Include the file and evaluate the statements - """ - s = data.expand(self.what_file) - logger.debug2("CONF %s:%s: including %s", self.filename, self.lineno, s) - - for path in data.getVar("BBPATH").split(":"): - bb.parse.ConfHandler.include(self.filename, os.path.join(path, s), self.lineno, data, False) - -class ExportNode(AstNode): - def __init__(self, filename, lineno, var): - AstNode.__init__(self, filename, lineno) - self.var = var - - def eval(self, data): - data.setVarFlag(self.var, "export", 1, op = 'exported') - -class UnsetNode(AstNode): - def __init__(self, filename, lineno, var): - AstNode.__init__(self, filename, lineno) - self.var = var - - def eval(self, data): - loginfo = { - 'variable': self.var, - 'file': self.filename, - 'line': self.lineno, - } - data.delVar(self.var,**loginfo) - -class UnsetFlagNode(AstNode): - def __init__(self, filename, lineno, var, flag): - AstNode.__init__(self, filename, lineno) - self.var = var - self.flag = flag - - def eval(self, data): - loginfo = { - 'variable': self.var, - 'file': self.filename, - 'line': self.lineno, - } - data.delVarFlag(self.var, self.flag, **loginfo) - -class DataNode(AstNode): - """ - Various data related updates. For the sake of sanity - we have one class doing all this. This means that all - this need to be re-evaluated... we might be able to do - that faster with multiple classes. - """ - def __init__(self, filename, lineno, groupd): - AstNode.__init__(self, filename, lineno) - self.groupd = groupd - - def getFunc(self, key, data): - if 'flag' in self.groupd and self.groupd['flag'] is not None: - return data.getVarFlag(key, self.groupd['flag'], expand=False, noweakdefault=True) - else: - return data.getVar(key, False, noweakdefault=True, parsing=True) - - def eval(self, data): - groupd = self.groupd - key = groupd["var"] - loginfo = { - 'variable': key, - 'file': self.filename, - 'line': self.lineno, - } - if "exp" in groupd and groupd["exp"] is not None: - data.setVarFlag(key, "export", 1, op = 'exported', **loginfo) - - op = "set" - if "ques" in groupd and groupd["ques"] is not None: - val = self.getFunc(key, data) - op = "set?" - if val is None: - val = groupd["value"] - elif "colon" in groupd and groupd["colon"] is not None: - e = data.createCopy() - op = "immediate" - val = e.expand(groupd["value"], key + "[:=]") - elif "append" in groupd and groupd["append"] is not None: - op = "append" - val = "%s %s" % ((self.getFunc(key, data) or ""), groupd["value"]) - elif "prepend" in groupd and groupd["prepend"] is not None: - op = "prepend" - val = "%s %s" % (groupd["value"], (self.getFunc(key, data) or "")) - elif "postdot" in groupd and groupd["postdot"] is not None: - op = "postdot" - val = "%s%s" % ((self.getFunc(key, data) or ""), groupd["value"]) - elif "predot" in groupd and groupd["predot"] is not None: - op = "predot" - val = "%s%s" % (groupd["value"], (self.getFunc(key, data) or "")) - else: - val = groupd["value"] - - if ":append" in key or ":remove" in key or ":prepend" in key: - if op in ["append", "prepend", "postdot", "predot", "ques"]: - bb.warn(key + " " + groupd[op] + " is not a recommended operator combination, please replace it.") - - flag = None - if 'flag' in groupd and groupd['flag'] is not None: - if groupd["lazyques"]: - flag = "_defaultval_flag_"+groupd['flag'] - else: - flag = groupd['flag'] - elif groupd["lazyques"]: - flag = "_defaultval" - - loginfo['op'] = op - loginfo['detail'] = groupd["value"] - - if flag: - data.setVarFlag(key, flag, val, **loginfo) - else: - data.setVar(key, val, parsing=True, **loginfo) - -class MethodNode(AstNode): - tr_tbl = str.maketrans('/.+-@%&~', '________') - - def __init__(self, filename, lineno, func_name, body, python, fakeroot): - AstNode.__init__(self, filename, lineno) - self.func_name = func_name - self.body = body - self.python = python - self.fakeroot = fakeroot - - def eval(self, data): - text = '\n'.join(self.body) - funcname = self.func_name - if self.func_name == "__anonymous": - funcname = ("__anon_%s_%s" % (self.lineno, self.filename.translate(MethodNode.tr_tbl))) - self.python = True - text = "def %s(d):\n" % (funcname) + text - bb.methodpool.insert_method(funcname, text, self.filename, self.lineno - len(self.body) - 1) - anonfuncs = data.getVar('__BBANONFUNCS', False) or [] - anonfuncs.append(funcname) - data.setVar('__BBANONFUNCS', anonfuncs) - if data.getVar(funcname, False): - # clean up old version of this piece of metadata, as its - # flags could cause problems - data.delVarFlag(funcname, 'python') - data.delVarFlag(funcname, 'fakeroot') - if self.python: - data.setVarFlag(funcname, "python", "1") - if self.fakeroot: - data.setVarFlag(funcname, "fakeroot", "1") - data.setVarFlag(funcname, "func", 1) - data.setVar(funcname, text, parsing=True) - data.setVarFlag(funcname, 'filename', self.filename) - data.setVarFlag(funcname, 'lineno', str(self.lineno - len(self.body))) - -class PythonMethodNode(AstNode): - def __init__(self, filename, lineno, function, modulename, body): - AstNode.__init__(self, filename, lineno) - self.function = function - self.modulename = modulename - self.body = body - - def eval(self, data): - # Note we will add root to parsedmethods after having parse - # 'this' file. This means we will not parse methods from - # bb classes twice - text = '\n'.join(self.body) - bb.methodpool.insert_method(self.modulename, text, self.filename, self.lineno - len(self.body) - 1) - data.setVarFlag(self.function, "func", 1) - data.setVarFlag(self.function, "python", 1) - data.setVar(self.function, text, parsing=True) - data.setVarFlag(self.function, 'filename', self.filename) - data.setVarFlag(self.function, 'lineno', str(self.lineno - len(self.body) - 1)) - -class ExportFuncsNode(AstNode): - def __init__(self, filename, lineno, fns, classname): - AstNode.__init__(self, filename, lineno) - self.n = fns.split() - self.classname = classname - - def eval(self, data): - - sentinel = " # Export function set\n" - for func in self.n: - calledfunc = self.classname + "_" + func - - basevar = data.getVar(func, False) - if basevar and sentinel not in basevar: - continue - - if data.getVar(func, False): - data.setVarFlag(func, 'python', None) - data.setVarFlag(func, 'func', None) - - for flag in [ "func", "python" ]: - if data.getVarFlag(calledfunc, flag, False): - data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag, False)) - for flag in ["dirs", "cleandirs", "fakeroot"]: - if data.getVarFlag(func, flag, False): - data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag, False)) - data.setVarFlag(func, "filename", "autogenerated") - data.setVarFlag(func, "lineno", 1) - - if data.getVarFlag(calledfunc, "python", False): - data.setVar(func, sentinel + " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True) - else: - if "-" in self.classname: - bb.fatal("The classname %s contains a dash character and is calling an sh function %s using EXPORT_FUNCTIONS. Since a dash is illegal in sh function names, this cannot work, please rename the class or don't use EXPORT_FUNCTIONS." % (self.classname, calledfunc)) - data.setVar(func, sentinel + " " + calledfunc + "\n", parsing=True) - -class AddTaskNode(AstNode): - def __init__(self, filename, lineno, tasks, before, after): - AstNode.__init__(self, filename, lineno) - self.tasks = tasks - self.before = before - self.after = after - - def eval(self, data): - tasks = self.tasks.split() - for task in tasks: - bb.build.addtask(task, self.before, self.after, data) - -class DelTaskNode(AstNode): - def __init__(self, filename, lineno, tasks): - AstNode.__init__(self, filename, lineno) - self.tasks = tasks - - def eval(self, data): - tasks = data.expand(self.tasks).split() - for task in tasks: - bb.build.deltask(task, data) - -class BBHandlerNode(AstNode): - def __init__(self, filename, lineno, fns): - AstNode.__init__(self, filename, lineno) - self.hs = fns.split() - - def eval(self, data): - bbhands = data.getVar('__BBHANDLERS', False) or [] - for h in self.hs: - bbhands.append(h) - data.setVarFlag(h, "handler", 1) - data.setVar('__BBHANDLERS', bbhands) - -class PyLibNode(AstNode): - def __init__(self, filename, lineno, libdir, namespace): - AstNode.__init__(self, filename, lineno) - self.libdir = libdir - self.namespace = namespace - - def eval(self, data): - global_mods = (data.getVar("BB_GLOBAL_PYMODULES") or "").split() - for m in global_mods: - if m not in bb.utils._context: - bb.utils._context[m] = __import__(m) - - libdir = data.expand(self.libdir) - if libdir not in sys.path: - sys.path.append(libdir) - try: - bb.utils._context[self.namespace] = __import__(self.namespace) - toimport = getattr(bb.utils._context[self.namespace], "BBIMPORTS", []) - for i in toimport: - bb.utils._context[self.namespace] = __import__(self.namespace + "." + i) - mod = getattr(bb.utils._context[self.namespace], i) - fn = getattr(mod, "__file__") - funcs = {} - for f in dir(mod): - if f.startswith("_"): - continue - fcall = getattr(mod, f) - if not callable(fcall): - continue - funcs[f] = fcall - bb.codeparser.add_module_functions(fn, funcs, "%s.%s" % (self.namespace, i)) - - except AttributeError as e: - bb.error("Error importing OE modules: %s" % str(e)) - -class InheritNode(AstNode): - def __init__(self, filename, lineno, classes): - AstNode.__init__(self, filename, lineno) - self.classes = classes - - def eval(self, data): - bb.parse.BBHandler.inherit(self.classes, self.filename, self.lineno, data) - -class InheritDeferredNode(AstNode): - def __init__(self, filename, lineno, classes): - AstNode.__init__(self, filename, lineno) - self.inherit = (classes, filename, lineno) - - def eval(self, data): - bb.parse.BBHandler.inherit_defer(*self.inherit, data) - -class AddFragmentsNode(AstNode): - def __init__(self, filename, lineno, fragments_path_prefix, fragments_variable, flagged_variables_list_variable, builtin_fragments_variable): - AstNode.__init__(self, filename, lineno) - self.fragments_path_prefix = fragments_path_prefix - self.fragments_variable = fragments_variable - self.flagged_variables_list_variable = flagged_variables_list_variable - self.builtin_fragments_variable = builtin_fragments_variable - - def eval(self, data): - # No need to use mark_dependency since we would only match a fragment - # from a specific layer and there can only be a single layer with a - # given namespace. - def find_fragment(layers, layerid, full_fragment_name): - for layerpath in layers.split(): - candidate_fragment_path = os.path.join(layerpath, full_fragment_name) - if os.path.exists(candidate_fragment_path) and bb.utils.get_file_layer(candidate_fragment_path, data) == layerid: - return candidate_fragment_path - return None - - def check_and_set_builtin_fragment(fragment, data, builtin_fragments): - prefix, value = fragment.split('/', 1) - if prefix in builtin_fragments.keys(): - # parsing=True since we want to emulate X=Y and allow X:override=Z to continue to exist - data.setVar(builtin_fragments[prefix], value, parsing=True) - return True - return False - - fragments = data.getVar(self.fragments_variable) - layers = data.getVar('BBLAYERS') - flagged_variables = data.getVar(self.flagged_variables_list_variable).split() - builtin_fragments = {f[0]:f[1] for f in [f.split(':') for f in data.getVar(self.builtin_fragments_variable).split()] } - - if not fragments: - return - - # Check for multiple builtin fragments setting the same variable - for builtin_fragment_key in builtin_fragments.keys(): - builtin_fragments_list = list( - filter( - lambda f: f.startswith(builtin_fragment_key + "/"), - fragments.split(), - ) - ) - if len(builtin_fragments_list) > 1: - bb.warn( - ("Multiple builtin fragments are enabled for %s via variable %s: %s. " - "This likely points to a mis-configuration in the metadata, as only " - "one of them should be set. The build will use the last value.") - % ( - builtin_fragment_key, - self.fragments_variable, - " ".join(builtin_fragments_list), - ) - ) - - for f in fragments.split(): - if check_and_set_builtin_fragment(f, data, builtin_fragments): - continue - layerid, fragment_name = f.split('/', 1) - full_fragment_name = data.expand("{}/{}.conf".format(self.fragments_path_prefix, fragment_name)) - fragment_path = find_fragment(layers, layerid, full_fragment_name) - if fragment_path: - bb.parse.ConfHandler.include(self.filename, fragment_path, self.lineno, data, "include fragment") - for flagged_var in flagged_variables: - val = data.getVar(flagged_var) - data.setVarFlag(flagged_var, f, val) - data.setVar(flagged_var, None) - else: - bb.error("Could not find fragment {} in enabled layers: {}".format(f, layers)) - -def handleInclude(statements, filename, lineno, m, force): - statements.append(IncludeNode(filename, lineno, m.group(1), force)) - -def handleIncludeAll(statements, filename, lineno, m): - statements.append(IncludeAllNode(filename, lineno, m.group(1))) - -def handleExport(statements, filename, lineno, m): - statements.append(ExportNode(filename, lineno, m.group(1))) - -def handleUnset(statements, filename, lineno, m): - statements.append(UnsetNode(filename, lineno, m.group(1))) - -def handleUnsetFlag(statements, filename, lineno, m): - statements.append(UnsetFlagNode(filename, lineno, m.group(1), m.group(2))) - -def handleData(statements, filename, lineno, groupd): - statements.append(DataNode(filename, lineno, groupd)) - -def handleMethod(statements, filename, lineno, func_name, body, python, fakeroot): - statements.append(MethodNode(filename, lineno, func_name, body, python, fakeroot)) - -def handlePythonMethod(statements, filename, lineno, funcname, modulename, body): - statements.append(PythonMethodNode(filename, lineno, funcname, modulename, body)) - -def handleExportFuncs(statements, filename, lineno, m, classname): - statements.append(ExportFuncsNode(filename, lineno, m.group(1), classname)) - -def handleAddTask(statements, filename, lineno, tasks, before, after): - statements.append(AddTaskNode(filename, lineno, tasks, before, after)) - -def handleDelTask(statements, filename, lineno, tasks): - statements.append(DelTaskNode(filename, lineno, tasks)) - -def handleBBHandlers(statements, filename, lineno, m): - statements.append(BBHandlerNode(filename, lineno, m.group(1))) - -def handlePyLib(statements, filename, lineno, m): - statements.append(PyLibNode(filename, lineno, m.group(1), m.group(2))) - -def handleInherit(statements, filename, lineno, m): - classes = m.group(1) - statements.append(InheritNode(filename, lineno, classes)) - -def handleInheritDeferred(statements, filename, lineno, m): - classes = m.group(1) - statements.append(InheritDeferredNode(filename, lineno, classes)) - -def handleAddFragments(statements, filename, lineno, m): - fragments_path_prefix = m.group(1) - fragments_variable = m.group(2) - flagged_variables_list_variable = m.group(3) - builtin_fragments_variable = m.group(4) - statements.append(AddFragmentsNode(filename, lineno, fragments_path_prefix, fragments_variable, flagged_variables_list_variable, builtin_fragments_variable)) - -def runAnonFuncs(d): - code = [] - for funcname in d.getVar("__BBANONFUNCS", False) or []: - code.append("%s(d)" % funcname) - bb.utils.better_exec("\n".join(code), {"d": d}) - -# Handle recipe level PREFERRED_PROVIDERs -def handleVirtRecipeProviders(tasklist, d): - depends = (d.getVar("DEPENDS") or "").split() - virtprovs = (d.getVar("BB_RECIPE_VIRTUAL_PROVIDERS") or "").split() - newdeps = [] - for dep in depends: - if dep in virtprovs: - newdep = d.getVar("PREFERRED_PROVIDER_" + dep) - if not newdep: - bb.fatal("Error, recipe virtual provider PREFERRED_PROVIDER_%s not set" % dep) - newdeps.append(newdep) - else: - newdeps.append(dep) - d.setVar("DEPENDS", " ".join(newdeps)) - for task in tasklist: - taskdeps = (d.getVarFlag(task, "depends") or "").split() - remapped = [] - for entry in taskdeps: - r, t = entry.split(":") - if r in virtprovs: - r = d.getVar("PREFERRED_PROVIDER_" + r) - remapped.append("%s:%s" % (r, t)) - d.setVarFlag(task, "depends", " ".join(remapped)) - -def finalize(fn, d, variant = None): - saved_handlers = bb.event.get_handlers().copy() - try: - # Found renamed variables. Exit immediately - if d.getVar("_FAILPARSINGERRORHANDLED", False) == True: - raise bb.BBHandledException() - - inherits = [x[0] for x in (d.getVar('__BBDEFINHERITS', False) or [('',)])] - bb.event.fire(bb.event.RecipePreDeferredInherits(fn, inherits), d) - - while True: - inherits = d.getVar('__BBDEFINHERITS', False) or [] - if not inherits: - break - inherit, filename, lineno = inherits.pop(0) - d.setVar('__BBDEFINHERITS', inherits) - bb.parse.BBHandler.inherit(inherit, filename, lineno, d, deferred=True) - - for var in d.getVar('__BBHANDLERS', False) or []: - # try to add the handler - handlerfn = d.getVarFlag(var, "filename", False) - if not handlerfn: - bb.fatal("Undefined event handler function '%s'" % var) - handlerln = int(d.getVarFlag(var, "lineno", False)) - bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln, data=d) - - bb.event.fire(bb.event.RecipePreFinalise(fn), d) - - bb.data.expandKeys(d) - - bb.event.fire(bb.event.RecipePostKeyExpansion(fn), d) - - runAnonFuncs(d) - - tasklist = d.getVar('__BBTASKS', False) or [] - bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d) - handleVirtRecipeProviders(tasklist, d) - bb.build.add_tasks(tasklist, d) - - bb.parse.siggen.finalise(fn, d, variant) - - d.setVar('BBINCLUDED', bb.parse.get_file_depends(d)) - - if d.getVar('__BBAUTOREV_SEEN') and d.getVar('__BBSRCREV_SEEN') and not d.getVar("__BBAUTOREV_ACTED_UPON"): - bb.fatal("AUTOREV/SRCPV set too late for the fetcher to work properly, please set the variables earlier in parsing. Erroring instead of later obtuse build failures.") - - bb.event.fire(bb.event.RecipeParsed(fn), d) - finally: - bb.event.set_handlers(saved_handlers) - -def _create_variants(datastores, names, function, onlyfinalise): - def create_variant(name, orig_d, arg = None): - if onlyfinalise and name not in onlyfinalise: - return - new_d = bb.data.createCopy(orig_d) - function(arg or name, new_d) - datastores[name] = new_d - - for variant in list(datastores.keys()): - for name in names: - if not variant: - # Based on main recipe - create_variant(name, datastores[""]) - else: - create_variant("%s-%s" % (variant, name), datastores[variant], name) - -def multi_finalize(fn, d): - appends = (d.getVar("__BBAPPEND") or "").split() - for append in appends: - logger.debug("Appending .bbappend file %s to %s", append, fn) - bb.parse.BBHandler.handle(append, d, True) - - onlyfinalise = d.getVar("__ONLYFINALISE", False) - - safe_d = d - d = bb.data.createCopy(safe_d) - try: - finalize(fn, d) - except bb.parse.SkipRecipe as e: - d.setVar("__SKIPPED", e.args[0]) - datastores = {"": safe_d} - - extended = d.getVar("BBCLASSEXTEND") or "" - if extended: - # the following is to support bbextends with arguments, for e.g. multilib - # an example is as follows: - # BBCLASSEXTEND = "multilib:lib32" - # it will create foo-lib32, inheriting multilib.bbclass and set - # BBEXTENDCURR to "multilib" and BBEXTENDVARIANT to "lib32" - extendedmap = {} - variantmap = {} - - for ext in extended.split(): - eext = ext.split(':', 2) - if len(eext) > 1: - extendedmap[ext] = eext[0] - variantmap[ext] = eext[1] - else: - extendedmap[ext] = ext - - pn = d.getVar("PN") - def extendfunc(name, d): - if name != extendedmap[name]: - d.setVar("BBEXTENDCURR", extendedmap[name]) - d.setVar("BBEXTENDVARIANT", variantmap[name]) - else: - d.setVar("PN", "%s-%s" % (pn, name)) - bb.parse.BBHandler.inherit_defer(extendedmap[name], fn, 0, d) - - safe_d.setVar("BBCLASSEXTEND", extended) - _create_variants(datastores, extendedmap.keys(), extendfunc, onlyfinalise) - - for variant in datastores.keys(): - if variant: - try: - if not onlyfinalise or variant in onlyfinalise: - finalize(fn, datastores[variant], variant) - except bb.parse.SkipRecipe as e: - datastores[variant].setVar("__SKIPPED", e.args[0]) - - datastores[""] = d - return datastores diff --git a/bitbake/lib/bb/parse/parse_py/BBHandler.py b/bitbake/lib/bb/parse/parse_py/BBHandler.py deleted file mode 100644 index 008fec2308..0000000000 --- a/bitbake/lib/bb/parse/parse_py/BBHandler.py +++ /dev/null @@ -1,306 +0,0 @@ -""" - class for handling .bb files - - Reads a .bb file and obtains its metadata - -""" - - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import re, bb, os -import bb.build, bb.utils, bb.data_smart - -from . import ConfHandler -from .. import resolve_file, ast, logger, ParseError -from .ConfHandler import include, init - -__func_start_regexp__ = re.compile(r"(((?Ppython(?=(\s|\()))|(?Pfakeroot(?=\s)))\s*)*(?P[\w\.\-\+\{\}\$:]+)?\s*\(\s*\)\s*{$" ) -__inherit_regexp__ = re.compile(r"inherit\s+(.+)" ) -__inherit_def_regexp__ = re.compile(r"inherit_defer\s+(.+)" ) -__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" ) -__addtask_regexp__ = re.compile(r"addtask\s+([^#\n]+)(?P#.*|.*?)") -__deltask_regexp__ = re.compile(r"deltask\s+([^#\n]+)(?P#.*|.*?)") -__addhandler_regexp__ = re.compile(r"addhandler\s+(.+)" ) -__def_regexp__ = re.compile(r"def\s+(\w+).*:" ) -__python_func_regexp__ = re.compile(r"(\s+.*)|(^$)|(^#)" ) -__python_tab_regexp__ = re.compile(r" *\t") - -__infunc__ = [] -__inpython__ = False -__body__ = [] -__classname__ = "" -__residue__ = [] - -cached_statements = {} - -def supports(fn, d): - """Return True if fn has a supported extension""" - return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"] - -def inherit_defer(expression, fn, lineno, d): - inherit = (expression, fn, lineno) - inherits = d.getVar('__BBDEFINHERITS', False) or [] - inherits.append(inherit) - d.setVar('__BBDEFINHERITS', inherits) - -def inherit(files, fn, lineno, d, deferred=False): - __inherit_cache = d.getVar('__inherit_cache', False) or [] - #if "${" in files and not deferred: - # bb.warn("%s:%s has non deferred conditional inherit" % (fn, lineno)) - files = d.expand(files).split() - for file in files: - defer = (d.getVar("BB_DEFER_BBCLASSES") or "").split() - if not deferred and file in defer: - inherit_defer(file, fn, lineno, d) - continue - classtype = d.getVar("__bbclasstype", False) - origfile = file - for t in ["classes-" + classtype, "classes"]: - file = origfile - if not os.path.isabs(file) and not file.endswith(".bbclass"): - file = os.path.join(t, '%s.bbclass' % file) - - if not os.path.isabs(file): - bbpath = d.getVar("BBPATH") - abs_fn, attempts = bb.utils.which(bbpath, file, history=True) - for af in attempts: - if af != abs_fn: - bb.parse.mark_dependency(d, af) - if abs_fn: - file = abs_fn - - if os.path.exists(file): - break - - if not os.path.exists(file): - raise ParseError("Could not inherit file %s" % (file), fn, lineno) - - if not file in __inherit_cache: - logger.debug("Inheriting %s (from %s:%d)" % (file, fn, lineno)) - __inherit_cache.append( file ) - d.setVar('__inherit_cache', __inherit_cache) - try: - bb.parse.handle(file, d, True) - except (IOError, OSError) as exc: - raise ParseError("Could not inherit file %s: %s" % (fn, exc.strerror), fn, lineno) - __inherit_cache = d.getVar('__inherit_cache', False) or [] - -def get_statements(filename, absolute_filename, base_name): - global cached_statements, __residue__, __body__ - - try: - return cached_statements[absolute_filename] - except KeyError: - with open(absolute_filename, 'r') as f: - statements = ast.StatementGroup() - - lineno = 0 - while True: - lineno = lineno + 1 - s = f.readline() - if not s: break - s = s.rstrip() - feeder(lineno, s, filename, base_name, statements) - - if __inpython__: - # add a blank line to close out any python definition - feeder(lineno, "", filename, base_name, statements, eof=True) - - if __residue__: - raise ParseError("Unparsed lines %s: %s" % (filename, str(__residue__)), filename, lineno) - if __body__: - raise ParseError("Unparsed lines from unclosed function %s: %s" % (filename, str(__body__)), filename, lineno) - - if filename.endswith(".bbclass") or filename.endswith(".inc"): - cached_statements[absolute_filename] = statements - return statements - -def handle(fn, d, include, baseconfig=False): - global __infunc__, __body__, __residue__, __classname__ - __body__ = [] - __infunc__ = [] - __classname__ = "" - __residue__ = [] - - base_name = os.path.basename(fn) - (root, ext) = os.path.splitext(base_name) - init(d) - - if ext == ".bbclass": - __classname__ = root - __inherit_cache = d.getVar('__inherit_cache', False) or [] - if not fn in __inherit_cache: - __inherit_cache.append(fn) - d.setVar('__inherit_cache', __inherit_cache) - - if include != 0: - oldfile = d.getVar('FILE', False) - else: - oldfile = None - - abs_fn = resolve_file(fn, d) - - # actual loading - statements = get_statements(fn, abs_fn, base_name) - - # DONE WITH PARSING... time to evaluate - if ext != ".bbclass" and abs_fn != oldfile: - d.setVar('FILE', abs_fn) - - try: - statements.eval(d) - except bb.parse.SkipRecipe: - d.setVar("__SKIPPED", True) - if include == 0: - return { "" : d } - - if __infunc__: - raise ParseError("Shell function %s is never closed" % __infunc__[0], __infunc__[1], __infunc__[2]) - if __residue__: - raise ParseError("Leftover unparsed (incomplete?) data %s from %s" % __residue__, fn) - - if ext != ".bbclass" and include == 0: - return ast.multi_finalize(fn, d) - - if ext != ".bbclass" and oldfile and abs_fn != oldfile: - d.setVar("FILE", oldfile) - - return d - -def feeder(lineno, s, fn, root, statements, eof=False): - global __inpython__, __infunc__, __body__, __residue__, __classname__ - - # Check tabs in python functions: - # - def py_funcname(): covered by __inpython__ - # - python(): covered by '__anonymous' == __infunc__[0] - # - python funcname(): covered by __infunc__[3] - if __inpython__ or (__infunc__ and ('__anonymous' == __infunc__[0] or __infunc__[3])): - tab = __python_tab_regexp__.match(s) - if tab: - bb.warn('python should use 4 spaces indentation, but found tabs in %s, line %s' % (root, lineno)) - - if __infunc__: - if s == '}': - __body__.append('') - ast.handleMethod(statements, fn, lineno, __infunc__[0], __body__, __infunc__[3], __infunc__[4]) - __infunc__ = [] - __body__ = [] - else: - __body__.append(s) - return - - if __inpython__: - m = __python_func_regexp__.match(s) - if m and not eof: - __body__.append(s) - return - else: - ast.handlePythonMethod(statements, fn, lineno, __inpython__, - root, __body__) - __body__ = [] - __inpython__ = False - - if eof: - return - - if s and s[0] == '#': - if len(__residue__) != 0 and __residue__[0][0] != "#": - bb.fatal("There is a comment on line %s of file %s:\n'''\n%s\n'''\nwhich is in the middle of a multiline expression. This syntax is invalid, please correct it." % (lineno, fn, s)) - - if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"): - bb.fatal("There is a confusing multiline partially commented expression on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed." % (lineno - len(__residue__), fn, "\n".join(__residue__))) - - if s and s[-1] == '\\': - __residue__.append(s[:-1]) - return - - s = "".join(__residue__) + s - __residue__ = [] - - # Skip empty lines - if s == '': - return - - # Skip comments - if s[0] == '#': - return - - m = __func_start_regexp__.match(s) - if m: - __infunc__ = [m.group("func") or "__anonymous", fn, lineno, m.group("py") is not None, m.group("fr") is not None] - return - - m = __def_regexp__.match(s) - if m: - __body__.append(s) - __inpython__ = m.group(1) - - return - - m = __export_func_regexp__.match(s) - if m: - ast.handleExportFuncs(statements, fn, lineno, m, __classname__) - return - - m = __addtask_regexp__.match(s) - if m: - after = "" - before = "" - - # This code splits on 'before' and 'after' instead of on whitespace so we can defer - # evaluation to as late as possible. - tasks = m.group(1).split(" before ")[0].split(" after ")[0] - - for exp in m.group(1).split(" before "): - exp2 = exp.split(" after ") - if len(exp2) > 1: - after = after + " ".join(exp2[1:]) - - for exp in m.group(1).split(" after "): - exp2 = exp.split(" before ") - if len(exp2) > 1: - before = before + " ".join(exp2[1:]) - - # Check and warn for having task with a keyword as part of task name - taskexpression = s.split() - for te in taskexpression: - if any( ( "%s_" % keyword ) in te for keyword in bb.data_smart.__setvar_keyword__ ): - raise ParseError("Task name '%s' contains a keyword which is not recommended/supported.\nPlease rename the task not to include the keyword.\n%s" % (te, ("\n".join(map(str, bb.data_smart.__setvar_keyword__)))), fn) - - if tasks is not None: - ast.handleAddTask(statements, fn, lineno, tasks, before, after) - return - - m = __deltask_regexp__.match(s) - if m: - task = m.group(1) - if task is not None: - ast.handleDelTask(statements, fn, lineno, task) - return - - m = __addhandler_regexp__.match(s) - if m: - ast.handleBBHandlers(statements, fn, lineno, m) - return - - m = __inherit_regexp__.match(s) - if m: - ast.handleInherit(statements, fn, lineno, m) - return - - m = __inherit_def_regexp__.match(s) - if m: - ast.handleInheritDeferred(statements, fn, lineno, m) - return - - return ConfHandler.feeder(lineno, s, fn, statements, conffile=False) - -# Add us to the handlers list -from .. import handlers -handlers.append({'supports': supports, 'handle': handle, 'init': init}) -del handlers diff --git a/bitbake/lib/bb/parse/parse_py/ConfHandler.py b/bitbake/lib/bb/parse/parse_py/ConfHandler.py deleted file mode 100644 index 9ddbae123d..0000000000 --- a/bitbake/lib/bb/parse/parse_py/ConfHandler.py +++ /dev/null @@ -1,221 +0,0 @@ -""" - class for handling configuration data files - - Reads a .conf file and obtains its metadata - -""" - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import errno -import re -import os -import bb.utils -from bb.parse import ParseError, resolve_file, ast, logger, handle - -__config_regexp__ = re.compile( r""" - ^ - (?Pexport\s+)? - (?P[a-zA-Z0-9\-_+.${}/~:]*?) - (\[(?P[a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@/]*)\])? - - (?P\s*) ( - (?P:=) | - (?P\?\?=) | - (?P\?=) | - (?P\+=) | - (?P=\+) | - (?P=\.) | - (?P\.=) | - = - ) (?P\s*) - - (?!'[^']*'[^']*'$) - (?!\"[^\"]*\"[^\"]*\"$) - (?P['\"]) - (?P.*) - (?P=apo) - $ - """, re.X) -__include_regexp__ = re.compile( r"include\s+(.+)" ) -__require_regexp__ = re.compile( r"require\s+(.+)" ) -__includeall_regexp__ = re.compile( r"include_all\s+(.+)" ) -__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" ) -__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" ) -__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@]+)\]$" ) -__addpylib_regexp__ = re.compile(r"addpylib\s+(.+)\s+(.+)" ) -__addfragments_regexp__ = re.compile(r"addfragments\s+(.+)\s+(.+)\s+(.+)\s+(.+)" ) - -def init(data): - return - -def supports(fn, d): - return fn[-5:] == ".conf" - -def include(parentfn, fns, lineno, data, error_out): - """ - error_out: A string indicating the verb (e.g. "include", "inherit") to be - used in a ParseError that will be raised if the file to be included could - not be included. Specify False to avoid raising an error in this case. - """ - fns = data.expand(fns) - parentfn = data.expand(parentfn) - - # "include" or "require" accept zero to n space-separated file names to include. - for fn in fns.split(): - include_single_file(parentfn, fn, lineno, data, error_out) - -def include_single_file(parentfn, fn, lineno, data, error_out): - """ - Helper function for include() which does not expand or split its parameters. - """ - if parentfn == fn: # prevent infinite recursion - return None - - if not os.path.isabs(fn): - dname = os.path.dirname(parentfn) - bbpath = "%s:%s" % (dname, data.getVar("BBPATH")) - abs_fn, attempts = bb.utils.which(bbpath, fn, history=True) - if abs_fn and bb.parse.check_dependency(data, abs_fn): - logger.warning("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE'))) - for af in attempts: - bb.parse.mark_dependency(data, af) - if abs_fn: - fn = abs_fn - elif bb.parse.check_dependency(data, fn): - logger.warning("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE'))) - - try: - bb.parse.handle(fn, data, True) - except (IOError, OSError) as exc: - if exc.errno == errno.ENOENT: - if error_out: - raise ParseError("Could not %s file %s" % (error_out, fn), parentfn, lineno) - logger.debug2("CONF file '%s' not found", fn) - else: - if error_out: - raise ParseError("Could not %s file %s: %s" % (error_out, fn, exc.strerror), parentfn, lineno) - else: - raise ParseError("Error parsing %s: %s" % (fn, exc.strerror), parentfn, lineno) - -# We have an issue where a UI might want to enforce particular settings such as -# an empty DISTRO variable. If configuration files do something like assigning -# a weak default, it turns out to be very difficult to filter out these changes, -# particularly when the weak default might appear half way though parsing a chain -# of configuration files. We therefore let the UIs hook into configuration file -# parsing. This turns out to be a hard problem to solve any other way. -confFilters = [] - -def handle(fn, data, include, baseconfig=False): - init(data) - - if include == 0: - oldfile = None - else: - oldfile = data.getVar('FILE', False) - - abs_fn = resolve_file(fn, data) - with open(abs_fn, 'r') as f: - - statements = ast.StatementGroup() - lineno = 0 - while True: - lineno = lineno + 1 - s = f.readline() - if not s: - break - origlineno = lineno - origline = s - w = s.strip() - # skip empty lines - if not w: - continue - s = s.rstrip() - while s[-1] == '\\': - line = f.readline() - origline += line - s2 = line.rstrip() - lineno = lineno + 1 - if (not s2 or s2 and s2[0] != "#") and s[0] == "#" : - bb.fatal("There is a confusing multiline, partially commented expression starting on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed." % (origlineno, fn, origline)) - - s = s[:-1] + s2 - # skip comments - if s[0] == '#': - continue - feeder(lineno, s, abs_fn, statements, baseconfig=baseconfig) - - # DONE WITH PARSING... time to evaluate - data.setVar('FILE', abs_fn) - statements.eval(data) - if oldfile: - data.setVar('FILE', oldfile) - - for f in confFilters: - f(fn, data) - - return data - -# baseconfig is set for the bblayers/layer.conf cookerdata config parsing -# The function is also used by BBHandler, conffile would be False -def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True): - m = __config_regexp__.match(s) - if m: - groupd = m.groupdict() - if groupd['var'] == "": - raise ParseError("Empty variable name in assignment: '%s'" % s, fn, lineno); - if not groupd['whitespace'] or not groupd['whitespace2']: - logger.warning("%s:%s has a lack of whitespace around the assignment: '%s'" % (fn, lineno, s)) - ast.handleData(statements, fn, lineno, groupd) - return - - m = __include_regexp__.match(s) - if m: - ast.handleInclude(statements, fn, lineno, m, False) - return - - m = __require_regexp__.match(s) - if m: - ast.handleInclude(statements, fn, lineno, m, True) - return - - m = __includeall_regexp__.match(s) - if m: - ast.handleIncludeAll(statements, fn, lineno, m) - return - - m = __export_regexp__.match(s) - if m: - ast.handleExport(statements, fn, lineno, m) - return - - m = __unset_regexp__.match(s) - if m: - ast.handleUnset(statements, fn, lineno, m) - return - - m = __unset_flag_regexp__.match(s) - if m: - ast.handleUnsetFlag(statements, fn, lineno, m) - return - - m = __addpylib_regexp__.match(s) - if baseconfig and conffile and m: - ast.handlePyLib(statements, fn, lineno, m) - return - - m = __addfragments_regexp__.match(s) - if m: - ast.handleAddFragments(statements, fn, lineno, m) - return - - raise ParseError("unparsed line: '%s'" % s, fn, lineno); - -# Add us to the handlers list -from bb.parse import handlers -handlers.append({'supports': supports, 'handle': handle, 'init': init}) -del handlers diff --git a/bitbake/lib/bb/parse/parse_py/__init__.py b/bitbake/lib/bb/parse/parse_py/__init__.py deleted file mode 100644 index f508afa14e..0000000000 --- a/bitbake/lib/bb/parse/parse_py/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -BitBake Parsers - -File parsers for the BitBake build tools. - -""" - -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Based on functions from the base bb module, Copyright 2003 Holger Schurig -# - -from __future__ import absolute_import -from . import ConfHandler -from . import BBHandler - -__version__ = '1.0' diff --git a/bitbake/lib/bb/process.py b/bitbake/lib/bb/process.py deleted file mode 100644 index 4c7b6d39df..0000000000 --- a/bitbake/lib/bb/process.py +++ /dev/null @@ -1,190 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import logging -import signal -import subprocess -import errno -import select -import bb - -logger = logging.getLogger('BitBake.Process') - -def subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - -class CmdError(RuntimeError): - def __init__(self, command, msg=None): - self.command = command - self.msg = msg - - def __str__(self): - if not isinstance(self.command, str): - cmd = subprocess.list2cmdline(self.command) - else: - cmd = self.command - - msg = "Execution of '%s' failed" % cmd - if self.msg: - msg += ': %s' % self.msg - return msg - -class NotFoundError(CmdError): - def __str__(self): - return CmdError.__str__(self) + ": command not found" - -class ExecutionError(CmdError): - def __init__(self, command, exitcode, stdout = None, stderr = None): - CmdError.__init__(self, command) - self.exitcode = exitcode - self.stdout = stdout - self.stderr = stderr - self.extra_message = None - - def __str__(self): - message = "" - if self.stderr: - message += self.stderr - if self.stdout: - message += self.stdout - if message: - message = ":\n" + message - return (CmdError.__str__(self) + - " with exit code %s" % self.exitcode + message + (self.extra_message or "")) - -class Popen(subprocess.Popen): - defaults = { - "close_fds": True, - "preexec_fn": subprocess_setup, - "stdout": subprocess.PIPE, - "stderr": subprocess.PIPE, - "stdin": subprocess.PIPE, - "shell": False, - } - - def __init__(self, *args, **kwargs): - options = dict(self.defaults) - options.update(kwargs) - subprocess.Popen.__init__(self, *args, **options) - -def _logged_communicate(pipe, log, input, extrafiles): - if pipe.stdin: - if input is not None: - pipe.stdin.write(input) - pipe.stdin.close() - - outdata, errdata = [], [] - rin = [] - - if pipe.stdout is not None: - bb.utils.nonblockingfd(pipe.stdout.fileno()) - rin.append(pipe.stdout) - if pipe.stderr is not None: - bb.utils.nonblockingfd(pipe.stderr.fileno()) - rin.append(pipe.stderr) - for fobj, _ in extrafiles: - bb.utils.nonblockingfd(fobj.fileno()) - rin.append(fobj) - - def readextras(selected): - for fobj, func in extrafiles: - if fobj in selected: - try: - data = fobj.read() - except IOError as err: - if err.errno == errno.EAGAIN or err.errno == errno.EWOULDBLOCK: - data = None - if data is not None: - func(data) - - def read_all_pipes(log, rin, outdata, errdata): - rlist = rin - stdoutbuf = b"" - stderrbuf = b"" - - try: - r,w,e = select.select (rlist, [], [], 1) - except OSError as e: - if e.errno != errno.EINTR: - raise - - readextras(r) - - if pipe.stdout in r: - data = stdoutbuf + pipe.stdout.read() - if data is not None and len(data) > 0: - try: - data = data.decode("utf-8") - outdata.append(data) - log.write(data) - log.flush() - stdoutbuf = b"" - except UnicodeDecodeError: - stdoutbuf = data - - if pipe.stderr in r: - data = stderrbuf + pipe.stderr.read() - if data is not None and len(data) > 0: - try: - data = data.decode("utf-8") - errdata.append(data) - log.write(data) - log.flush() - stderrbuf = b"" - except UnicodeDecodeError: - stderrbuf = data - - try: - # Read all pipes while the process is open - while pipe.poll() is None: - read_all_pipes(log, rin, outdata, errdata) - - # Process closed, drain all pipes... - read_all_pipes(log, rin, outdata, errdata) - finally: - log.flush() - - if pipe.stdout is not None: - pipe.stdout.close() - if pipe.stderr is not None: - pipe.stderr.close() - return ''.join(outdata), ''.join(errdata) - -def run(cmd, input=None, log=None, extrafiles=None, **options): - """Convenience function to run a command and return its output, raising an - exception when the command fails""" - - if not extrafiles: - extrafiles = [] - - if isinstance(cmd, str) and not "shell" in options: - options["shell"] = True - - try: - pipe = Popen(cmd, **options) - except OSError as exc: - if exc.errno == 2: - raise NotFoundError(cmd) - else: - raise CmdError(cmd, exc) - - if log: - stdout, stderr = _logged_communicate(pipe, log, input, extrafiles) - else: - stdout, stderr = pipe.communicate(input) - if not stdout is None: - stdout = stdout.decode("utf-8") - if not stderr is None: - stderr = stderr.decode("utf-8") - - if pipe.returncode != 0: - if log: - # Don't duplicate the output in the exception if logging it - raise ExecutionError(cmd, pipe.returncode, None, None) - raise ExecutionError(cmd, pipe.returncode, stdout, stderr) - return stdout, stderr diff --git a/bitbake/lib/bb/progress.py b/bitbake/lib/bb/progress.py deleted file mode 100644 index 9518be77fb..0000000000 --- a/bitbake/lib/bb/progress.py +++ /dev/null @@ -1,310 +0,0 @@ -""" -BitBake progress handling code -""" - -# Copyright (C) 2016 Intel Corporation -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import re -import time -import inspect -import bb.event -import bb.build -from bb.build import StdoutNoopContextManager - - -# from https://stackoverflow.com/a/14693789/221061 -ANSI_ESCAPE_REGEX = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]') - - -def filter_color(string): - """ - Filter ANSI escape codes out of |string|, return new string - """ - return ANSI_ESCAPE_REGEX.sub('', string) - - -def filter_color_n(string): - """ - Filter ANSI escape codes out of |string|, returns tuple of - (new string, # of ANSI codes removed) - """ - return ANSI_ESCAPE_REGEX.subn('', string) - - -class ProgressHandler: - """ - Base class that can pretend to be a file object well enough to be - used to build objects to intercept console output and determine the - progress of some operation. - """ - def __init__(self, d, outfile=None): - self._progress = 0 - self._data = d - self._lastevent = 0 - if outfile: - self._outfile = outfile - else: - self._outfile = StdoutNoopContextManager() - - def __enter__(self): - self._outfile.__enter__() - return self - - def __exit__(self, *excinfo): - self._outfile.__exit__(*excinfo) - - def _fire_progress(self, taskprogress, rate=None): - """Internal function to fire the progress event""" - bb.event.fire(bb.build.TaskProgress(taskprogress, rate), self._data) - - def write(self, string): - self._outfile.write(string) - - def flush(self): - self._outfile.flush() - - def update(self, progress, rate=None): - ts = time.time() - if progress > 100: - progress = 100 - if progress != self._progress or self._lastevent + 1 < ts: - self._fire_progress(progress, rate) - self._lastevent = ts - self._progress = progress - - -class LineFilterProgressHandler(ProgressHandler): - """ - A ProgressHandler variant that provides the ability to filter out - the lines if they contain progress information. Additionally, it - filters out anything before the last line feed on a line. This can - be used to keep the logs clean of output that we've only enabled for - getting progress, assuming that that can be done on a per-line - basis. - """ - def __init__(self, d, outfile=None): - self._linebuffer = '' - super().__init__(d, outfile) - - def write(self, string): - self._linebuffer += string - while True: - breakpos = self._linebuffer.find('\n') + 1 - if breakpos == 0: - # for the case when the line with progress ends with only '\r' - breakpos = self._linebuffer.find('\r') + 1 - if breakpos == 0: - break - line = self._linebuffer[:breakpos] - self._linebuffer = self._linebuffer[breakpos:] - # Drop any line feeds and anything that precedes them - lbreakpos = line.rfind('\r') + 1 - if lbreakpos and lbreakpos != breakpos: - line = line[lbreakpos:] - if self.writeline(filter_color(line)): - super().write(line) - - def writeline(self, line): - return True - - -class BasicProgressHandler(ProgressHandler): - def __init__(self, d, regex=r'(\d+)%', outfile=None): - super().__init__(d, outfile) - self._regex = re.compile(regex) - # Send an initial progress event so the bar gets shown - self._fire_progress(0) - - def write(self, string): - percs = self._regex.findall(filter_color(string)) - if percs: - progress = int(percs[-1]) - self.update(progress) - super().write(string) - - -class OutOfProgressHandler(ProgressHandler): - def __init__(self, d, regex, outfile=None): - super().__init__(d, outfile) - self._regex = re.compile(regex) - # Send an initial progress event so the bar gets shown - self._fire_progress(0) - - def write(self, string): - nums = self._regex.findall(filter_color(string)) - if nums: - progress = (float(nums[-1][0]) / float(nums[-1][1])) * 100 - self.update(progress) - super().write(string) - - -class MultiStageProgressReporter: - """ - Class which allows reporting progress without the caller - having to know where they are in the overall sequence. Useful - for tasks made up of python code spread across multiple - classes / functions - the progress reporter object can - be passed around or stored at the object level and calls - to next_stage() and update() made wherever needed. - """ - def __init__(self, d, stage_weights, debug=False): - """ - Initialise the progress reporter. - - Parameters: - * d: the datastore (needed for firing the events) - * stage_weights: a list of weight values, one for each stage. - The value is scaled internally so you only need to specify - values relative to other values in the list, so if there - are two stages and the first takes 2s and the second takes - 10s you would specify [2, 10] (or [1, 5], it doesn't matter). - * debug: specify True (and ensure you call finish() at the end) - in order to show a printout of the calculated stage weights - based on timing each stage. Use this to determine what the - weights should be when you're not sure. - """ - self._data = d - total = sum(stage_weights) - self._stage_weights = [float(x)/total for x in stage_weights] - self._stage = -1 - self._base_progress = 0 - # Send an initial progress event so the bar gets shown - self._fire_progress(0) - self._debug = debug - self._finished = False - if self._debug: - self._last_time = time.time() - self._stage_times = [] - self._stage_total = None - self._callers = [] - - def __enter__(self): - return self - - def __exit__(self, *excinfo): - pass - - def _fire_progress(self, taskprogress): - bb.event.fire(bb.build.TaskProgress(taskprogress), self._data) - - def next_stage(self, stage_total=None): - """ - Move to the next stage. - Parameters: - * stage_total: optional total for progress within the stage, - see update() for details - NOTE: you need to call this before the first stage. - """ - self._stage += 1 - self._stage_total = stage_total - if self._stage == 0: - # First stage - if self._debug: - self._last_time = time.time() - else: - if self._stage < len(self._stage_weights): - self._base_progress = sum(self._stage_weights[:self._stage]) * 100 - if self._debug: - currtime = time.time() - self._stage_times.append(currtime - self._last_time) - self._last_time = currtime - self._callers.append(inspect.getouterframes(inspect.currentframe())[1]) - elif not self._debug: - bb.warn('ProgressReporter: current stage beyond declared number of stages') - self._base_progress = 100 - self._fire_progress(self._base_progress) - - def update(self, stage_progress): - """ - Update progress within the current stage. - Parameters: - * stage_progress: progress value within the stage. If stage_total - was specified when next_stage() was last called, then this - value is considered to be out of stage_total, otherwise it should - be a percentage value from 0 to 100. - """ - progress = None - if self._stage_total: - stage_progress = (float(stage_progress) / self._stage_total) * 100 - if self._stage < 0: - bb.warn('ProgressReporter: update called before first call to next_stage()') - elif self._stage < len(self._stage_weights): - progress = self._base_progress + (stage_progress * self._stage_weights[self._stage]) - else: - progress = self._base_progress - if progress: - if progress > 100: - progress = 100 - self._fire_progress(progress) - - def finish(self): - if self._finished: - return - self._finished = True - if self._debug: - import math - self._stage_times.append(time.time() - self._last_time) - mintime = max(min(self._stage_times), 0.01) - self._callers.append(None) - stage_weights = [int(math.ceil(x / mintime)) for x in self._stage_times] - bb.warn('Stage weights: %s' % stage_weights) - out = [] - for stage_weight, caller in zip(stage_weights, self._callers): - if caller: - out.append('Up to %s:%d: %d' % (caller[1], caller[2], stage_weight)) - else: - out.append('Up to finish: %d' % stage_weight) - bb.warn('Stage times:\n %s' % '\n '.join(out)) - - -class MultiStageProcessProgressReporter(MultiStageProgressReporter): - """ - Version of MultiStageProgressReporter intended for use with - standalone processes (such as preparing the runqueue) - """ - def __init__(self, d, processname, stage_weights, debug=False): - self._processname = processname - self._started = False - super().__init__(d, stage_weights, debug) - - def start(self): - if not self._started: - bb.event.fire(bb.event.ProcessStarted(self._processname, 100), self._data) - self._started = True - - def _fire_progress(self, taskprogress): - if taskprogress == 0: - self.start() - return - bb.event.fire(bb.event.ProcessProgress(self._processname, taskprogress), self._data) - - def finish(self): - MultiStageProgressReporter.finish(self) - bb.event.fire(bb.event.ProcessFinished(self._processname), self._data) - - -class DummyMultiStageProcessProgressReporter(MultiStageProgressReporter): - """ - MultiStageProcessProgressReporter that takes the calls and does nothing - with them (to avoid a bunch of "if progress_reporter:" checks) - """ - def __init__(self): - super().__init__(None, []) - - def _fire_progress(self, taskprogress, rate=None): - pass - - def start(self): - pass - - def next_stage(self, stage_total=None): - pass - - def update(self, stage_progress): - pass - - def finish(self): - pass diff --git a/bitbake/lib/bb/providers.py b/bitbake/lib/bb/providers.py deleted file mode 100644 index e11a4637d1..0000000000 --- a/bitbake/lib/bb/providers.py +++ /dev/null @@ -1,442 +0,0 @@ -# -# Copyright (C) 2003, 2004 Chris Larson -# Copyright (C) 2003, 2004 Phil Blundell -# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer -# Copyright (C) 2005 Holger Hans Peter Freyther -# Copyright (C) 2005 ROAD GmbH -# Copyright (C) 2006 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import re -import logging -from bb import data, utils -from collections import defaultdict -import bb - -logger = logging.getLogger("BitBake.Provider") - -class NoProvider(bb.BBHandledException): - """Exception raised when no provider of a build dependency can be found""" - -class NoRProvider(bb.BBHandledException): - """Exception raised when no provider of a runtime dependency can be found""" - -class MultipleRProvider(bb.BBHandledException): - """Exception raised when multiple providers of a runtime dependency can be found""" - -def findProviders(cfgData, dataCache, pkg_pn = None): - """ - Convenience function to get latest and preferred providers in pkg_pn - """ - - if not pkg_pn: - pkg_pn = dataCache.pkg_pn - - # Need to ensure data store is expanded - localdata = data.createCopy(cfgData) - bb.data.expandKeys(localdata) - - required = {} - preferred_versions = {} - latest_versions = {} - - for pn in pkg_pn: - (last_ver, last_file, pref_ver, pref_file, req) = findBestProvider(pn, localdata, dataCache, pkg_pn) - preferred_versions[pn] = (pref_ver, pref_file) - latest_versions[pn] = (last_ver, last_file) - required[pn] = req - - return (latest_versions, preferred_versions, required) - -def allProviders(dataCache): - """ - Find all providers for each pn - """ - all_providers = defaultdict(list) - for (fn, pn) in dataCache.pkg_fn.items(): - ver = dataCache.pkg_pepvpr[fn] - all_providers[pn].append((ver, fn)) - return all_providers - -def sortPriorities(pn, dataCache, pkg_pn = None): - """ - Reorder pkg_pn by file priority and default preference - """ - - if not pkg_pn: - pkg_pn = dataCache.pkg_pn - - files = pkg_pn[pn] - priorities = {} - for f in files: - priority = dataCache.bbfile_priority[f] - preference = dataCache.pkg_dp[f] - if priority not in priorities: - priorities[priority] = {} - if preference not in priorities[priority]: - priorities[priority][preference] = [] - priorities[priority][preference].append(f) - tmp_pn = [] - for pri in sorted(priorities): - tmp_pref = [] - for pref in sorted(priorities[pri]): - tmp_pref.extend(priorities[pri][pref]) - tmp_pn = [tmp_pref] + tmp_pn - - return tmp_pn - -def versionVariableMatch(cfgData, keyword, pn): - """ - Return the value of the _VERSION variable if set. - """ - - # pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot - # hence we do this manually rather than use OVERRIDES - ver = cfgData.getVar("%s_VERSION:pn-%s" % (keyword, pn)) - if not ver: - ver = cfgData.getVar("%s_VERSION_%s" % (keyword, pn)) - if not ver: - ver = cfgData.getVar("%s_VERSION" % keyword) - - return ver - -def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): - """ - Check if the version pe,pv,pr is the preferred one. - If there is preferred version defined and ends with '%', then pv has to start with that version after removing the '%' - """ - if pr == preferred_r or preferred_r is None: - if pe == preferred_e or preferred_e is None: - if preferred_v == pv: - return True - if preferred_v is not None and preferred_v.endswith('%') and pv.startswith(preferred_v[:len(preferred_v)-1]): - return True - return False - -def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): - """ - Find the first provider in pkg_pn with REQUIRED_VERSION or PREFERRED_VERSION set. - """ - - preferred_file = None - preferred_ver = None - required = False - - required_v = versionVariableMatch(cfgData, "REQUIRED", pn) - preferred_v = versionVariableMatch(cfgData, "PREFERRED", pn) - - itemstr = "" - if item: - itemstr = " (for item %s)" % item - - if required_v is not None: - if preferred_v is not None: - logger.warning("REQUIRED_VERSION and PREFERRED_VERSION for package %s%s are both set using REQUIRED_VERSION %s", pn, itemstr, required_v) - else: - logger.debug("REQUIRED_VERSION is set for package %s%s", pn, itemstr) - # REQUIRED_VERSION always takes precedence over PREFERRED_VERSION - preferred_v = required_v - required = True - - if preferred_v: - m = re.match(r'(\d+:)*(.*)(_.*)*', preferred_v) - if m: - if m.group(1): - preferred_e = m.group(1)[:-1] - else: - preferred_e = None - preferred_v = m.group(2) - if m.group(3): - preferred_r = m.group(3)[1:] - else: - preferred_r = None - else: - preferred_e = None - preferred_r = None - - for file_set in pkg_pn: - for f in file_set: - pe, pv, pr = dataCache.pkg_pepvpr[f] - if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): - preferred_file = f - preferred_ver = (pe, pv, pr) - break - if preferred_file: - break; - if preferred_r: - pv_str = '%s-%s' % (preferred_v, preferred_r) - else: - pv_str = preferred_v - if not (preferred_e is None): - pv_str = '%s:%s' % (preferred_e, pv_str) - if preferred_file is None: - if not required: - logger.warning("preferred version %s of %s not available%s", pv_str, pn, itemstr) - available_vers = [] - for file_set in pkg_pn: - for f in file_set: - pe, pv, pr = dataCache.pkg_pepvpr[f] - ver_str = pv - if pe: - ver_str = "%s:%s" % (pe, ver_str) - if not ver_str in available_vers: - available_vers.append(ver_str) - if available_vers: - available_vers.sort() - logger.warning("versions of %s available: %s", pn, ' '.join(available_vers)) - if required: - logger.error("required version %s of %s not available%s", pv_str, pn, itemstr) - else: - if required: - logger.debug("selecting %s as REQUIRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr) - else: - logger.debug("selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr) - - return (preferred_ver, preferred_file, required) - -def findLatestProvider(pn, cfgData, dataCache, file_set): - """ - Return the highest version of the providers in file_set. - Take default preferences into account. - """ - latest = None - latest_p = 0 - latest_f = None - for file_name in file_set: - pe, pv, pr = dataCache.pkg_pepvpr[file_name] - dp = dataCache.pkg_dp[file_name] - - if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p): - latest = (pe, pv, pr) - latest_f = file_name - latest_p = dp - - return (latest, latest_f) - -def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): - """ - If there is a PREFERRED_VERSION, find the highest-priority bbfile - providing that version. If not, find the latest version provided by - an bbfile in the highest-priority set. - """ - - sortpkg_pn = sortPriorities(pn, dataCache, pkg_pn) - # Find the highest priority provider with a REQUIRED_VERSION or PREFERRED_VERSION set - (preferred_ver, preferred_file, required) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item) - # Find the latest version of the highest priority provider - (latest, latest_f) = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[0]) - - if not required and preferred_file is None: - preferred_file = latest_f - preferred_ver = latest - - return (latest, latest_f, preferred_ver, preferred_file, required) - -def _filterProviders(providers, item, cfgData, dataCache): - """ - Take a list of providers and filter/reorder according to the - environment variables - """ - eligible = [] - preferred_versions = {} - sortpkg_pn = {} - - # The order of providers depends on the order of the files on the disk - # up to here. Sort pkg_pn to make dependency issues reproducible rather - # than effectively random. - providers.sort() - - # Collate providers by PN - pkg_pn = {} - for p in providers: - pn = dataCache.pkg_fn[p] - if pn not in pkg_pn: - pkg_pn[pn] = [] - pkg_pn[pn].append(p) - - logger.debug("providers for %s are: %s", item, list(sorted(pkg_pn.keys()))) - - # First add REQUIRED_VERSIONS or PREFERRED_VERSIONS - for pn in sorted(pkg_pn): - sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn) - preferred_ver, preferred_file, required = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item) - if required and preferred_file is None: - return eligible - preferred_versions[pn] = (preferred_ver, preferred_file) - if preferred_versions[pn][1]: - eligible.append(preferred_versions[pn][1]) - - # Now add latest versions - for pn in sorted(sortpkg_pn): - if pn in preferred_versions and preferred_versions[pn][1]: - continue - preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0]) - eligible.append(preferred_versions[pn][1]) - - if not eligible: - return eligible - - # If pn == item, give it a slight default preference - # This means PREFERRED_PROVIDER_foobar defaults to foobar if available - for p in providers: - pn = dataCache.pkg_fn[p] - if pn != item: - continue - (newvers, fn) = preferred_versions[pn] - if not fn in eligible: - continue - eligible.remove(fn) - eligible = [fn] + eligible - - return eligible - -def filterProviders(providers, item, cfgData, dataCache): - """ - Take a list of providers and filter/reorder according to the - environment variables - Takes a "normal" target item - """ - - eligible = _filterProviders(providers, item, cfgData, dataCache) - - prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item) - if prefervar: - dataCache.preferred[item] = prefervar - - foundUnique = False - if item in dataCache.preferred: - for p in eligible: - pn = dataCache.pkg_fn[p] - if dataCache.preferred[item] == pn: - logger.verbose("selecting %s to satisfy %s due to PREFERRED_PROVIDERS", pn, item) - eligible.remove(p) - eligible = [p] + eligible - foundUnique = True - break - - logger.debug("sorted providers for %s are: %s", item, eligible) - - return eligible, foundUnique - -def filterProvidersRunTime(providers, item, cfgData, dataCache): - """ - Take a list of providers and filter/reorder according to the - environment variables - Takes a "runtime" target item - """ - - eligible = _filterProviders(providers, item, cfgData, dataCache) - - # First try and match any PREFERRED_RPROVIDER entry - prefervar = cfgData.getVar('PREFERRED_RPROVIDER_%s' % item) - foundUnique = False - if prefervar: - for p in eligible: - pn = dataCache.pkg_fn[p] - if prefervar == pn: - logger.verbose("selecting %s to satisfy %s due to PREFERRED_RPROVIDER", pn, item) - eligible.remove(p) - eligible = [p] + eligible - foundUnique = True - numberPreferred = 1 - break - - # If we didn't find an RPROVIDER entry, try and infer the provider from PREFERRED_PROVIDER entries - # by looking through the provides of each eligible recipe and seeing if a PREFERRED_PROVIDER was set. - # This is most useful for virtual/ entries rather than having a RPROVIDER per entry. - if not foundUnique: - # Should use dataCache.preferred here? - preferred = [] - preferred_vars = [] - pns = {} - for p in eligible: - pns[dataCache.pkg_fn[p]] = p - for p in eligible: - pn = dataCache.pkg_fn[p] - provides = dataCache.pn_provides[pn] - for provide in provides: - prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide) - #logger.debug("checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys()) - if prefervar in pns and pns[prefervar] not in preferred: - var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar) - logger.verbose("selecting %s to satisfy runtime %s due to %s", prefervar, item, var) - preferred_vars.append(var) - pref = pns[prefervar] - eligible.remove(pref) - eligible = [pref] + eligible - preferred.append(pref) - break - - numberPreferred = len(preferred) - - if numberPreferred > 1: - logger.error("Trying to resolve runtime dependency %s resulted in conflicting PREFERRED_PROVIDER entries being found.\nThe providers found were: %s\nThe PREFERRED_PROVIDER entries resulting in this conflict were: %s. You could set PREFERRED_RPROVIDER_%s" % (item, preferred, preferred_vars, item)) - - logger.debug("sorted runtime providers for %s are: %s", item, eligible) - - return eligible, numberPreferred - -regexp_cache = {} - -def getRuntimeProviders(dataCache, rdepend): - """ - Return any providers of runtime dependency - """ - rproviders = [] - - if rdepend in dataCache.rproviders: - rproviders += dataCache.rproviders[rdepend] - - if rdepend in dataCache.packages: - rproviders += dataCache.packages[rdepend] - - if rproviders: - return rproviders - - # Only search dynamic packages if we can't find anything in other variables - for pat_key in dataCache.packages_dynamic: - pattern = pat_key.replace(r'+', r"\+") - if pattern in regexp_cache: - regexp = regexp_cache[pattern] - else: - try: - regexp = re.compile(pattern) - except: - logger.error("Error parsing regular expression '%s'", pattern) - raise - regexp_cache[pattern] = regexp - if regexp.match(rdepend): - rproviders += dataCache.packages_dynamic[pat_key] - logger.debug("Assuming %s is a dynamic package, but it may not exist" % rdepend) - - return rproviders - -def buildWorldTargetList(dataCache, task=None): - """ - Build package list for "bitbake world" - """ - if dataCache.world_target: - return - - logger.debug("collating packages for \"world\"") - for f in dataCache.possible_world: - terminal = True - pn = dataCache.pkg_fn[f] - if task and task not in dataCache.task_deps[f]['tasks']: - logger.debug2("World build skipping %s as task %s doesn't exist", f, task) - terminal = False - - for p in dataCache.pn_provides[pn]: - if p.startswith('virtual/'): - logger.debug2("World build skipping %s due to %s provider starting with virtual/", f, p) - terminal = False - break - for pf in dataCache.providers[p]: - if dataCache.pkg_fn[pf] != pn: - logger.debug2("World build skipping %s due to both us and %s providing %s", f, pf, p) - terminal = False - break - if terminal: - dataCache.world_target.add(pn) diff --git a/bitbake/lib/bb/pysh/__init__.py b/bitbake/lib/bb/pysh/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/bitbake/lib/bb/pysh/pyshlex.py b/bitbake/lib/bb/pysh/pyshlex.py deleted file mode 100644 index a42c294464..0000000000 --- a/bitbake/lib/bb/pysh/pyshlex.py +++ /dev/null @@ -1,883 +0,0 @@ -# pyshlex.py - PLY compatible lexer for pysh. -# -# Copyright 2007 Patrick Mezard -# -# This software may be used and distributed according to the terms -# of the GNU General Public License, incorporated herein by reference. - -# TODO: -# - review all "char in 'abc'" snippets: the empty string can be matched -# - test line continuations within quoted/expansion strings -# - eof is buggy wrt sublexers -# - the lexer cannot really work in pull mode as it would be required to run -# PLY in pull mode. It was designed to work incrementally and it would not be -# that hard to enable pull mode. -import re - -from ply import lex -from bb.pysh.sherrors import * - -class NeedMore(Exception): - pass - -def is_blank(c): - return c in (' ', '\t') - -_RE_DIGITS = re.compile(r'^\d+$') - -def are_digits(s): - return _RE_DIGITS.search(s) is not None - -_OPERATORS = dict([ - ('&&', 'AND_IF'), - ('||', 'OR_IF'), - (';;', 'DSEMI'), - ('<<', 'DLESS'), - ('>>', 'DGREAT'), - ('<&', 'LESSAND'), - ('>&', 'GREATAND'), - ('<>', 'LESSGREAT'), - ('<<-', 'DLESSDASH'), - ('>|', 'CLOBBER'), - ('&', 'AMP'), - (';', 'COMMA'), - ('<', 'LESS'), - ('>', 'GREATER'), - ('(', 'LPARENS'), - (')', 'RPARENS'), -]) - -#Make a function to silence pychecker "Local variable shadows global" -def make_partial_ops(): - partials = {} - for k in _OPERATORS: - for i in range(1, len(k)+1): - partials[k[:i]] = None - return partials - -_PARTIAL_OPERATORS = make_partial_ops() - -def is_partial_op(s): - """Return True if s matches a non-empty subpart of an operator starting - at its first character. - """ - return s in _PARTIAL_OPERATORS - -def is_op(s): - """If s matches an operator, returns the operator identifier. Return None - otherwise. - """ - return _OPERATORS.get(s) - -_RESERVEDS = dict([ - ('if', 'If'), - ('then', 'Then'), - ('else', 'Else'), - ('elif', 'Elif'), - ('fi', 'Fi'), - ('do', 'Do'), - ('done', 'Done'), - ('case', 'Case'), - ('esac', 'Esac'), - ('while', 'While'), - ('until', 'Until'), - ('for', 'For'), - ('{', 'Lbrace'), - ('}', 'Rbrace'), - ('!', 'Bang'), - ('in', 'In'), - ('|', 'PIPE'), -]) - -def get_reserved(s): - return _RESERVEDS.get(s) - -_RE_NAME = re.compile(r'^[0-9a-zA-Z_]+$') - -def is_name(s): - return _RE_NAME.search(s) is not None - -def find_chars(seq, chars): - for i,v in enumerate(seq): - if v in chars: - return i,v - return -1, None - -class WordLexer: - """WordLexer parse quoted or expansion expressions and return an expression - tree. The input string can be any well formed sequence beginning with quoting - or expansion character. Embedded expressions are handled recursively. The - resulting tree is made of lists and strings. Lists represent quoted or - expansion expressions. Each list first element is the opening separator, - the last one the closing separator. In-between can be any number of strings - or lists for sub-expressions. Non quoted/expansion expression can written as - strings or as lists with empty strings as starting and ending delimiters. - """ - - NAME_CHARSET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' - NAME_CHARSET = dict(zip(NAME_CHARSET, NAME_CHARSET)) - - SPECIAL_CHARSET = '@*#?-$!0' - - #Characters which can be escaped depends on the current delimiters - ESCAPABLE = { - '`': set(['$', '\\', '`']), - '"': set(['$', '\\', '`', '"']), - "'": set(), - } - - def __init__(self, heredoc = False): - # _buffer is the unprocessed input characters buffer - self._buffer = [] - # _stack is empty or contains a quoted list being processed - # (this is the DFS path to the quoted expression being evaluated). - self._stack = [] - self._escapable = None - # True when parsing unquoted here documents - self._heredoc = heredoc - - def add(self, data, eof=False): - """Feed the lexer with more data. If the quoted expression can be - delimited, return a tuple (expr, remaining) containing the expression - tree and the unconsumed data. - Otherwise, raise NeedMore. - """ - self._buffer += list(data) - self._parse(eof) - - result = self._stack[0] - remaining = ''.join(self._buffer) - self._stack = [] - self._buffer = [] - return result, remaining - - def _is_escapable(self, c, delim=None): - if delim is None: - if self._heredoc: - # Backslashes works as if they were double quoted in unquoted - # here-documents - delim = '"' - else: - if len(self._stack)<=1: - return True - delim = self._stack[-2][0] - - escapables = self.ESCAPABLE.get(delim, None) - return escapables is None or c in escapables - - def _parse_squote(self, buf, result, eof): - if not buf: - raise NeedMore() - try: - pos = buf.index("'") - except ValueError: - raise NeedMore() - result[-1] += ''.join(buf[:pos]) - result += ["'"] - return pos+1, True - - def _parse_bquote(self, buf, result, eof): - if not buf: - raise NeedMore() - - if buf[0]=='\n': - #Remove line continuations - result[:] = ['', '', ''] - elif self._is_escapable(buf[0]): - result[-1] += buf[0] - result += [''] - else: - #Keep as such - result[:] = ['', '\\'+buf[0], ''] - - return 1, True - - def _parse_dquote(self, buf, result, eof): - if not buf: - raise NeedMore() - pos, sep = find_chars(buf, '$\\`"') - if pos==-1: - raise NeedMore() - - result[-1] += ''.join(buf[:pos]) - if sep=='"': - result += ['"'] - return pos+1, True - else: - #Keep everything until the separator and defer processing - return pos, False - - def _parse_command(self, buf, result, eof): - if not buf: - raise NeedMore() - - chars = '$\\`"\'' - if result[0] == '$(': - chars += ')' - pos, sep = find_chars(buf, chars) - if pos == -1: - raise NeedMore() - - result[-1] += ''.join(buf[:pos]) - if (result[0]=='$(' and sep==')') or (result[0]=='`' and sep=='`'): - result += [sep] - return pos+1, True - else: - return pos, False - - def _parse_parameter(self, buf, result, eof): - if not buf: - raise NeedMore() - - pos, sep = find_chars(buf, '$\\`"\'}') - if pos==-1: - raise NeedMore() - - result[-1] += ''.join(buf[:pos]) - if sep=='}': - result += [sep] - return pos+1, True - else: - return pos, False - - def _parse_dollar(self, buf, result, eof): - sep = result[0] - if sep=='$': - if not buf: - #TODO: handle empty $ - raise NeedMore() - if buf[0]=='(': - if len(buf)==1: - raise NeedMore() - - if buf[1]=='(': - result[0] = '$((' - buf[:2] = [] - else: - result[0] = '$(' - buf[:1] = [] - - elif buf[0]=='{': - result[0] = '${' - buf[:1] = [] - else: - if buf[0] in self.SPECIAL_CHARSET: - result[-1] = buf[0] - read = 1 - else: - for read,c in enumerate(buf): - if c not in self.NAME_CHARSET: - break - else: - if not eof: - raise NeedMore() - read += 1 - - result[-1] += ''.join(buf[0:read]) - - if not result[-1]: - result[:] = ['', result[0], ''] - else: - result += [''] - return read,True - - sep = result[0] - if sep=='$(': - parsefunc = self._parse_command - elif sep=='${': - parsefunc = self._parse_parameter - else: - raise NotImplementedError(sep) - - pos, closed = parsefunc(buf, result, eof) - return pos, closed - - def _parse(self, eof): - buf = self._buffer - stack = self._stack - recurse = False - - while 1: - if not stack or recurse: - if not buf: - raise NeedMore() - if buf[0] not in ('"\\`$\''): - raise ShellSyntaxError('Invalid quoted string sequence') - stack.append([buf[0], '']) - buf[:1] = [] - recurse = False - - result = stack[-1] - if result[0]=="'": - parsefunc = self._parse_squote - elif result[0]=='\\': - parsefunc = self._parse_bquote - elif result[0]=='"': - parsefunc = self._parse_dquote - elif result[0]=='`': - parsefunc = self._parse_command - elif result[0][0]=='$': - parsefunc = self._parse_dollar - else: - raise NotImplementedError() - - read, closed = parsefunc(buf, result, eof) - - buf[:read] = [] - if closed: - if len(stack)>1: - #Merge in parent expression - parsed = stack.pop() - stack[-1] += [parsed] - stack[-1] += [''] - else: - break - else: - recurse = True - -def normalize_wordtree(wtree): - """Fold back every literal sequence (delimited with empty strings) into - parent sequence. - """ - def normalize(wtree): - result = [] - for part in wtree[1:-1]: - if isinstance(part, list): - part = normalize(part) - if part[0]=='': - #Move the part content back at current level - result += part[1:-1] - continue - elif not part: - #Remove empty strings - continue - result.append(part) - if not result: - result = [''] - return [wtree[0]] + result + [wtree[-1]] - - return normalize(wtree) - - -def make_wordtree(token, here_document=False): - """Parse a delimited token and return a tree similar to the ones returned by - WordLexer. token may contain any combinations of expansion/quoted fields and - non-ones. - """ - tree = [''] - remaining = token - delimiters = '\\$`' - if not here_document: - delimiters += '\'"' - - while 1: - pos, sep = find_chars(remaining, delimiters) - if pos==-1: - tree += [remaining, ''] - return normalize_wordtree(tree) - tree.append(remaining[:pos]) - remaining = remaining[pos:] - - try: - result, remaining = WordLexer(heredoc = here_document).add(remaining, True) - except NeedMore: - raise ShellSyntaxError('Invalid token "%s"') - tree.append(result) - - -def wordtree_as_string(wtree): - """Rewrite an expression tree generated by make_wordtree as string.""" - def visit(node, output): - for child in node: - if isinstance(child, list): - visit(child, output) - else: - output.append(child) - - output = [] - visit(wtree, output) - return ''.join(output) - - -def unquote_wordtree(wtree): - """Fold the word tree while removing quotes everywhere. Other expansion - sequences are joined as such. - """ - def unquote(wtree): - unquoted = [] - if wtree[0] in ('', "'", '"', '\\'): - wtree = wtree[1:-1] - - for part in wtree: - if isinstance(part, list): - part = unquote(part) - unquoted.append(part) - return ''.join(unquoted) - - return unquote(wtree) - - -class HereDocLexer: - """HereDocLexer delimits whatever comes from the here-document starting newline - not included to the closing delimiter line included. - """ - def __init__(self, op, delim): - assert op in ('<<', '<<-') - if not delim: - raise ShellSyntaxError('invalid here document delimiter %s' % str(delim)) - - self._op = op - self._delim = delim - self._buffer = [] - self._token = [] - - def add(self, data, eof): - """If the here-document was delimited, return a tuple (content, remaining). - Raise NeedMore() otherwise. - """ - self._buffer += list(data) - self._parse(eof) - token = ''.join(self._token) - remaining = ''.join(self._buffer) - self._token, self._remaining = [], [] - return token, remaining - - def _parse(self, eof): - while 1: - #Look for first unescaped newline. Quotes may be ignored - escaped = False - for i,c in enumerate(self._buffer): - if escaped: - escaped = False - elif c=='\\': - escaped = True - elif c=='\n': - break - else: - i = -1 - - if i==-1 or self._buffer[i]!='\n': - if not eof: - raise NeedMore() - #No more data, maybe the last line is closing delimiter - line = ''.join(self._buffer) - eol = '' - self._buffer[:] = [] - else: - line = ''.join(self._buffer[:i]) - eol = self._buffer[i] - self._buffer[:i+1] = [] - - if self._op=='<<-': - line = line.lstrip('\t') - - if line==self._delim: - break - - self._token += [line, eol] - if i==-1: - break - -class Token: - #TODO: check this is still in use - OPERATOR = 'OPERATOR' - WORD = 'WORD' - - def __init__(self): - self.value = '' - self.type = None - - def __getitem__(self, key): - #Behave like a two elements tuple - if key==0: - return self.type - if key==1: - return self.value - raise IndexError(key) - - -class HereDoc: - def __init__(self, op, name=None): - self.op = op - self.name = name - self.pendings = [] - -TK_COMMA = 'COMMA' -TK_AMPERSAND = 'AMP' -TK_OP = 'OP' -TK_TOKEN = 'TOKEN' -TK_COMMENT = 'COMMENT' -TK_NEWLINE = 'NEWLINE' -TK_IONUMBER = 'IO_NUMBER' -TK_ASSIGNMENT = 'ASSIGNMENT_WORD' -TK_HERENAME = 'HERENAME' - -class Lexer: - """Main lexer. - - Call add() until the script AST is returned. - """ - # Here-document handling makes the whole thing more complex because they basically - # force tokens to be reordered: here-content must come right after the operator - # and the here-document name, while some other tokens might be following the - # here-document expression on the same line. - # - # So, here-doc states are basically: - # *self._state==ST_NORMAL - # - self._heredoc.op is None: no here-document - # - self._heredoc.op is not None but name is: here-document operator matched, - # waiting for the document name/delimiter - # - self._heredoc.op and name are not None: here-document is ready, following - # tokens are being stored and will be pushed again when the document is - # completely parsed. - # *self._state==ST_HEREDOC - # - The here-document is being delimited by self._herelexer. Once it is done - # the content is pushed in front of the pending token list then all these - # tokens are pushed once again. - ST_NORMAL = 'ST_NORMAL' - ST_OP = 'ST_OP' - ST_BACKSLASH = 'ST_BACKSLASH' - ST_QUOTED = 'ST_QUOTED' - ST_COMMENT = 'ST_COMMENT' - ST_HEREDOC = 'ST_HEREDOC' - - #Match end of backquote strings - RE_BACKQUOTE_END = re.compile(r'(?=len(self._input): - if not eof: - raise NeedMore() - elif self._state not in (self.ST_OP, self.ST_QUOTED, self.ST_HEREDOC): - #Delimit the current token and leave cleanly - self._push_token('') - break - else: - #Let the sublexer handle the eof themselves - pass - - if self._state==self.ST_NORMAL: - self._parse_normal() - elif self._state==self.ST_COMMENT: - self._parse_comment() - elif self._state==self.ST_OP: - self._parse_op(eof) - elif self._state==self.ST_QUOTED: - self._parse_quoted(eof) - elif self._state==self.ST_HEREDOC: - self._parse_heredoc(eof) - else: - assert False, "Unknown state " + str(self._state) - - if self._heredoc.op is not None: - raise ShellSyntaxError('missing here-document delimiter') - - def _parse_normal(self): - c = self._input[self._pos] - if c=='\n': - self._push_token(c) - self._token = c - self._type = TK_NEWLINE - self._push_token('') - self._pos += 1 - elif c in ('\\', '\'', '"', '`', '$'): - self._state = self.ST_QUOTED - elif is_partial_op(c): - self._push_token(c) - - self._type = TK_OP - self._token += c - self._pos += 1 - self._state = self.ST_OP - elif is_blank(c): - self._push_token(c) - - #Discard blanks - self._pos += 1 - elif self._token: - self._token += c - self._pos += 1 - elif c=='#': - self._state = self.ST_COMMENT - self._type = TK_COMMENT - self._pos += 1 - else: - self._pos += 1 - self._token += c - - def _parse_op(self, eof): - assert self._token - - while 1: - if self._pos>=len(self._input): - if not eof: - raise NeedMore() - c = '' - else: - c = self._input[self._pos] - - op = self._token + c - if c and is_partial_op(op): - #Still parsing an operator - self._token = op - self._pos += 1 - else: - #End of operator - self._push_token(c) - self._state = self.ST_NORMAL - break - - def _parse_comment(self): - while 1: - if self._pos>=len(self._input): - raise NeedMore() - - c = self._input[self._pos] - if c=='\n': - #End of comment, do not consume the end of line - self._state = self.ST_NORMAL - break - else: - self._token += c - self._pos += 1 - - def _parse_quoted(self, eof): - """Precondition: the starting backquote/dollar is still in the input queue.""" - if not self._wordlexer: - self._wordlexer = WordLexer() - - if self._pos'): - #Detect IO_NUMBER - self._type = TK_IONUMBER - elif self._token==';': - self._type = TK_COMMA - elif self._token=='&': - self._type = TK_AMPERSAND - elif self._type==TK_COMMENT: - #Comments are not part of sh grammar, ignore them - self._token = '' - self._type = TK_TOKEN - return 0 - - if self._for_count is not None: - #Track token count in 'For' expression to detect 'In' reserved words. - #Can only be in third position, no need to go beyond - self._for_count += 1 - if self._for_count==3: - self._for_count = None - - self.on_token((self._token, self._type)) - self._token = '' - self._type = TK_TOKEN - return 1 - - def on_token(self, token): - raise NotImplementedError - - -tokens = [ - TK_TOKEN, -# To silence yacc unused token warnings -# TK_COMMENT, - TK_NEWLINE, - TK_IONUMBER, - TK_ASSIGNMENT, - TK_HERENAME, -] - -#Add specific operators -tokens += _OPERATORS.values() -#Add reserved words -tokens += _RESERVEDS.values() - -class PLYLexer(Lexer): - """Bridge Lexer and PLY lexer interface.""" - def __init__(self): - Lexer.__init__(self) - self._tokens = [] - self._current = 0 - self.lineno = 0 - - def on_token(self, token): - value, type = token - - self.lineno = 0 - t = lex.LexToken() - t.value = value - t.type = type - t.lexer = self - t.lexpos = 0 - t.lineno = 0 - - self._tokens.append(t) - - def is_empty(self): - return not bool(self._tokens) - - #PLY compliant interface - def token(self): - if self._current>=len(self._tokens): - return None - t = self._tokens[self._current] - self._current += 1 - return t - - -def get_tokens(s): - """Parse the input string and return a tuple (tokens, unprocessed) where - tokens is a list of parsed tokens and unprocessed is the part of the input - string left untouched by the lexer. - """ - lexer = PLYLexer() - untouched = lexer.add(s, True) - tokens = [] - while 1: - token = lexer.token() - if token is None: - break - tokens.append(token) - - tokens = [(t.value, t.type) for t in tokens] - return tokens, untouched diff --git a/bitbake/lib/bb/pysh/pyshyacc.py b/bitbake/lib/bb/pysh/pyshyacc.py deleted file mode 100644 index 924860a6f3..0000000000 --- a/bitbake/lib/bb/pysh/pyshyacc.py +++ /dev/null @@ -1,783 +0,0 @@ -# pyshyacc.py - PLY grammar definition for pysh -# -# Copyright 2007 Patrick Mezard -# -# This software may be used and distributed according to the terms -# of the GNU General Public License, incorporated herein by reference. - -"""PLY grammar file. -""" -import os.path -import sys - -import bb.pysh.pyshlex as pyshlex -tokens = pyshlex.tokens - -from ply import yacc -import bb.pysh.sherrors as sherrors - -class IORedirect: - def __init__(self, op, filename, io_number=None): - self.op = op - self.filename = filename - self.io_number = io_number - -class HereDocument: - def __init__(self, op, name, content, io_number=None): - self.op = op - self.name = name - self.content = content - self.io_number = io_number - -def make_io_redirect(p): - """Make an IORedirect instance from the input 'io_redirect' production.""" - name, io_number, io_target = p - assert name=='io_redirect' - - if io_target[0]=='io_file': - io_type, io_op, io_file = io_target - return IORedirect(io_op, io_file, io_number) - elif io_target[0]=='io_here': - io_type, io_op, io_name, io_content = io_target - return HereDocument(io_op, io_name, io_content, io_number) - else: - assert False, "Invalid IO redirection token %s" % repr(io_type) - -class SimpleCommand: - """ - assigns contains (name, value) pairs. - """ - def __init__(self, words, redirs, assigns): - self.words = list(words) - self.redirs = list(redirs) - self.assigns = list(assigns) - -class Pipeline: - def __init__(self, commands, reverse_status=False): - self.commands = list(commands) - assert self.commands #Grammar forbids this - self.reverse_status = reverse_status - -class AndOr: - def __init__(self, op, left, right): - self.op = str(op) - self.left = left - self.right = right - -class ForLoop: - def __init__(self, name, items, cmds): - self.name = str(name) - self.items = list(items) - self.cmds = list(cmds) - -class WhileLoop: - def __init__(self, condition, cmds): - self.condition = list(condition) - self.cmds = list(cmds) - -class UntilLoop: - def __init__(self, condition, cmds): - self.condition = list(condition) - self.cmds = list(cmds) - -class FunDef: - def __init__(self, name, body): - self.name = str(name) - self.body = body - -class BraceGroup: - def __init__(self, cmds): - self.cmds = list(cmds) - -class IfCond: - def __init__(self, cond, if_cmds, else_cmds): - self.cond = list(cond) - self.if_cmds = if_cmds - self.else_cmds = else_cmds - -class Case: - def __init__(self, name, items): - self.name = name - self.items = items - -class SubShell: - def __init__(self, cmds): - self.cmds = cmds - -class RedirectList: - def __init__(self, cmd, redirs): - self.cmd = cmd - self.redirs = list(redirs) - -def get_production(productions, ptype): - """productions must be a list of production tuples like (name, obj) where - name is the production string identifier. - Return the first production named 'ptype'. Raise KeyError if None can be - found. - """ - for production in productions: - if production is not None and production[0]==ptype: - return production - raise KeyError(ptype) - -#------------------------------------------------------------------------------- -# PLY grammar definition -#------------------------------------------------------------------------------- - -def p_multiple_commands(p): - """multiple_commands : newline_sequence - | complete_command - | multiple_commands complete_command""" - if len(p)==2: - if p[1] is not None: - p[0] = [p[1]] - else: - p[0] = [] - else: - p[0] = p[1] + [p[2]] - -def p_complete_command(p): - """complete_command : list separator - | list""" - if len(p)==3 and p[2] and p[2][1] == '&': - p[0] = ('async', p[1]) - else: - p[0] = p[1] - -def p_list(p): - """list : list separator_op and_or - | and_or""" - if len(p)==2: - p[0] = [p[1]] - else: - #if p[2]!=';': - # raise NotImplementedError('AND-OR list asynchronous execution is not implemented') - p[0] = p[1] + [p[3]] - -def p_and_or(p): - """and_or : pipeline - | and_or AND_IF linebreak pipeline - | and_or OR_IF linebreak pipeline""" - if len(p)==2: - p[0] = p[1] - else: - p[0] = ('and_or', AndOr(p[2], p[1], p[4])) - -def p_maybe_bang_word(p): - """maybe_bang_word : Bang""" - p[0] = ('maybe_bang_word', p[1]) - -def p_pipeline(p): - """pipeline : pipe_sequence - | bang_word pipe_sequence""" - if len(p)==3: - p[0] = ('pipeline', Pipeline(p[2][1:], True)) - else: - p[0] = ('pipeline', Pipeline(p[1][1:])) - -def p_pipe_sequence(p): - """pipe_sequence : command - | pipe_sequence PIPE linebreak command""" - if len(p)==2: - p[0] = ['pipe_sequence', p[1]] - else: - p[0] = p[1] + [p[4]] - -def p_command(p): - """command : simple_command - | compound_command - | compound_command redirect_list - | function_definition""" - - if p[1][0] in ( 'simple_command', - 'for_clause', - 'while_clause', - 'until_clause', - 'case_clause', - 'if_clause', - 'function_definition', - 'subshell', - 'brace_group',): - if len(p) == 2: - p[0] = p[1] - else: - p[0] = ('redirect_list', RedirectList(p[1], p[2][1:])) - else: - raise NotImplementedError('%s command is not implemented' % repr(p[1][0])) - -def p_compound_command(p): - """compound_command : brace_group - | subshell - | for_clause - | case_clause - | if_clause - | while_clause - | until_clause""" - p[0] = p[1] - -def p_subshell(p): - """subshell : LPARENS compound_list RPARENS""" - p[0] = ('subshell', SubShell(p[2][1:])) - -def p_compound_list(p): - """compound_list : term - | newline_list term - | term separator - | newline_list term separator""" - productions = p[1:] - try: - sep = get_production(productions, 'separator') - if sep[1]!=';': - raise NotImplementedError() - except KeyError: - pass - term = get_production(productions, 'term') - p[0] = ['compound_list'] + term[1:] - -def p_term(p): - """term : term separator and_or - | and_or""" - if len(p)==2: - p[0] = ['term', p[1]] - else: - if p[2] is not None and p[2][1] == '&': - p[0] = ['term', ('async', p[1][1:])] + [p[3]] - else: - p[0] = p[1] + [p[3]] - -def p_maybe_for_word(p): - # Rearrange 'For' priority wrt TOKEN. See p_for_word - """maybe_for_word : For""" - p[0] = ('maybe_for_word', p[1]) - -def p_for_clause(p): - """for_clause : for_word name linebreak do_group - | for_word name linebreak in sequential_sep do_group - | for_word name linebreak in wordlist sequential_sep do_group""" - productions = p[1:] - do_group = get_production(productions, 'do_group') - try: - items = get_production(productions, 'in')[1:] - except KeyError: - raise NotImplementedError('"in" omission is not implemented') - - try: - items = get_production(productions, 'wordlist')[1:] - except KeyError: - items = [] - - name = p[2] - p[0] = ('for_clause', ForLoop(name, items, do_group[1:])) - -def p_name(p): - """name : token""" #Was NAME instead of token - p[0] = p[1] - -def p_in(p): - """in : In""" - p[0] = ('in', p[1]) - -def p_wordlist(p): - """wordlist : wordlist token - | token""" - if len(p)==2: - p[0] = ['wordlist', ('TOKEN', p[1])] - else: - p[0] = p[1] + [('TOKEN', p[2])] - -def p_case_clause(p): - """case_clause : Case token linebreak in linebreak case_list Esac - | Case token linebreak in linebreak case_list_ns Esac - | Case token linebreak in linebreak Esac""" - if len(p) < 8: - items = [] - else: - items = p[6][1:] - name = p[2] - p[0] = ('case_clause', Case(name, [c[1] for c in items])) - -def p_case_list_ns(p): - """case_list_ns : case_list case_item_ns - | case_item_ns""" - p_case_list(p) - -def p_case_list(p): - """case_list : case_list case_item - | case_item""" - if len(p)==2: - p[0] = ['case_list', p[1]] - else: - p[0] = p[1] + [p[2]] - -def p_case_item_ns(p): - """case_item_ns : pattern RPARENS linebreak - | pattern RPARENS compound_list linebreak - | LPARENS pattern RPARENS linebreak - | LPARENS pattern RPARENS compound_list linebreak""" - p_case_item(p) - -def p_case_item(p): - """case_item : pattern RPARENS linebreak DSEMI linebreak - | pattern RPARENS compound_list DSEMI linebreak - | LPARENS pattern RPARENS linebreak DSEMI linebreak - | LPARENS pattern RPARENS compound_list DSEMI linebreak""" - if len(p) < 7: - name = p[1][1:] - else: - name = p[2][1:] - - try: - cmds = get_production(p[1:], "compound_list")[1:] - except KeyError: - cmds = [] - - p[0] = ('case_item', (name, cmds)) - -def p_pattern(p): - """pattern : token - | pattern PIPE token""" - if len(p)==2: - p[0] = ['pattern', ('TOKEN', p[1])] - else: - p[0] = p[1] + [('TOKEN', p[2])] - -def p_maybe_if_word(p): - # Rearrange 'If' priority wrt TOKEN. See p_if_word - """maybe_if_word : If""" - p[0] = ('maybe_if_word', p[1]) - -def p_maybe_then_word(p): - # Rearrange 'Then' priority wrt TOKEN. See p_then_word - """maybe_then_word : Then""" - p[0] = ('maybe_then_word', p[1]) - -def p_if_clause(p): - """if_clause : if_word compound_list then_word compound_list else_part Fi - | if_word compound_list then_word compound_list Fi""" - else_part = [] - if len(p)==7: - else_part = p[5] - p[0] = ('if_clause', IfCond(p[2][1:], p[4][1:], else_part)) - -def p_else_part(p): - """else_part : Elif compound_list then_word compound_list else_part - | Elif compound_list then_word compound_list - | Else compound_list""" - if len(p)==3: - p[0] = p[2][1:] - else: - else_part = [] - if len(p)==6: - else_part = p[5] - p[0] = ('elif', IfCond(p[2][1:], p[4][1:], else_part)) - -def p_while_clause(p): - """while_clause : While compound_list do_group""" - p[0] = ('while_clause', WhileLoop(p[2][1:], p[3][1:])) - -def p_maybe_until_word(p): - # Rearrange 'Until' priority wrt TOKEN. See p_until_word - """maybe_until_word : Until""" - p[0] = ('maybe_until_word', p[1]) - -def p_until_clause(p): - """until_clause : until_word compound_list do_group""" - p[0] = ('until_clause', UntilLoop(p[2][1:], p[3][1:])) - -def p_function_definition(p): - """function_definition : fname LPARENS RPARENS linebreak function_body""" - p[0] = ('function_definition', FunDef(p[1], p[5])) - -def p_function_body(p): - """function_body : compound_command - | compound_command redirect_list""" - if len(p)!=2: - raise NotImplementedError('functions redirections lists are not implemented') - p[0] = p[1] - -def p_fname(p): - """fname : TOKEN""" #Was NAME instead of token - p[0] = p[1] - -def p_brace_group(p): - """brace_group : Lbrace compound_list Rbrace""" - p[0] = ('brace_group', BraceGroup(p[2][1:])) - -def p_maybe_done_word(p): - #See p_assignment_word for details. - """maybe_done_word : Done""" - p[0] = ('maybe_done_word', p[1]) - -def p_maybe_do_word(p): - """maybe_do_word : Do""" - p[0] = ('maybe_do_word', p[1]) - -def p_do_group(p): - """do_group : do_word compound_list done_word""" - #Do group contains a list of AndOr - p[0] = ['do_group'] + p[2][1:] - -def p_simple_command(p): - """simple_command : cmd_prefix cmd_word cmd_suffix - | cmd_prefix cmd_word - | cmd_prefix - | cmd_name cmd_suffix - | cmd_name""" - words, redirs, assigns = [], [], [] - for e in p[1:]: - name = e[0] - if name in ('cmd_prefix', 'cmd_suffix'): - for sube in e[1:]: - subname = sube[0] - if subname=='io_redirect': - redirs.append(make_io_redirect(sube)) - elif subname=='ASSIGNMENT_WORD': - assigns.append(sube) - else: - words.append(sube) - elif name in ('cmd_word', 'cmd_name'): - words.append(e) - - cmd = SimpleCommand(words, redirs, assigns) - p[0] = ('simple_command', cmd) - -def p_cmd_name(p): - """cmd_name : TOKEN""" - p[0] = ('cmd_name', p[1]) - -def p_cmd_word(p): - """cmd_word : token""" - p[0] = ('cmd_word', p[1]) - -def p_maybe_assignment_word(p): - #See p_assignment_word for details. - """maybe_assignment_word : ASSIGNMENT_WORD""" - p[0] = ('maybe_assignment_word', p[1]) - -def p_cmd_prefix(p): - """cmd_prefix : io_redirect - | cmd_prefix io_redirect - | assignment_word - | cmd_prefix assignment_word""" - try: - prefix = get_production(p[1:], 'cmd_prefix') - except KeyError: - prefix = ['cmd_prefix'] - - try: - value = get_production(p[1:], 'assignment_word')[1] - value = ('ASSIGNMENT_WORD', value.split('=', 1)) - except KeyError: - value = get_production(p[1:], 'io_redirect') - p[0] = prefix + [value] - -def p_cmd_suffix(p): - """cmd_suffix : io_redirect - | cmd_suffix io_redirect - | token - | cmd_suffix token - | maybe_for_word - | cmd_suffix maybe_for_word - | maybe_done_word - | cmd_suffix maybe_done_word - | maybe_do_word - | cmd_suffix maybe_do_word - | maybe_until_word - | cmd_suffix maybe_until_word - | maybe_assignment_word - | cmd_suffix maybe_assignment_word - | maybe_if_word - | cmd_suffix maybe_if_word - | maybe_then_word - | cmd_suffix maybe_then_word - | maybe_bang_word - | cmd_suffix maybe_bang_word""" - try: - suffix = get_production(p[1:], 'cmd_suffix') - token = p[2] - except KeyError: - suffix = ['cmd_suffix'] - token = p[1] - - if isinstance(token, tuple): - if token[0]=='io_redirect': - p[0] = suffix + [token] - else: - #Convert maybe_* to TOKEN if necessary - p[0] = suffix + [('TOKEN', token[1])] - else: - p[0] = suffix + [('TOKEN', token)] - -def p_redirect_list(p): - """redirect_list : io_redirect - | redirect_list io_redirect""" - if len(p) == 2: - p[0] = ['redirect_list', make_io_redirect(p[1])] - else: - p[0] = p[1] + [make_io_redirect(p[2])] - -def p_io_redirect(p): - """io_redirect : io_file - | IO_NUMBER io_file - | io_here - | IO_NUMBER io_here""" - if len(p)==3: - p[0] = ('io_redirect', p[1], p[2]) - else: - p[0] = ('io_redirect', None, p[1]) - -def p_io_file(p): - #Return the tuple (operator, filename) - """io_file : LESS filename - | LESSAND filename - | GREATER filename - | GREATAND filename - | DGREAT filename - | LESSGREAT filename - | CLOBBER filename""" - #Extract the filename from the file - p[0] = ('io_file', p[1], p[2][1]) - -def p_filename(p): - #Return the filename - """filename : TOKEN""" - p[0] = ('filename', p[1]) - -def p_io_here(p): - """io_here : DLESS here_end - | DLESSDASH here_end""" - p[0] = ('io_here', p[1], p[2][1], p[2][2]) - -def p_here_end(p): - """here_end : HERENAME TOKEN""" - p[0] = ('here_document', p[1], p[2]) - -def p_newline_sequence(p): - # Nothing in the grammar can handle leading NEWLINE productions, so add - # this one with the lowest possible priority relatively to newline_list. - """newline_sequence : newline_list""" - p[0] = None - -def p_newline_list(p): - """newline_list : NEWLINE - | newline_list NEWLINE""" - p[0] = None - -def p_linebreak(p): - """linebreak : newline_list - | empty""" - p[0] = None - -def p_separator_op(p): - """separator_op : COMMA - | COMMA COMMA - | AMP""" - p[0] = p[1] - -def p_separator(p): - """separator : separator_op linebreak - | newline_list""" - if len(p)==2: - #Ignore newlines - p[0] = None - else: - #Keep the separator operator - p[0] = ('separator', p[1]) - -def p_sequential_sep(p): - """sequential_sep : COMMA linebreak - | newline_list""" - p[0] = None - -# Low priority TOKEN => for_word conversion. -# Let maybe_for_word be used as a token when necessary in higher priority -# rules. -def p_for_word(p): - """for_word : maybe_for_word""" - p[0] = p[1] - -def p_if_word(p): - """if_word : maybe_if_word""" - p[0] = p[1] - -def p_then_word(p): - """then_word : maybe_then_word""" - p[0] = p[1] - -def p_done_word(p): - """done_word : maybe_done_word""" - p[0] = p[1] - -def p_do_word(p): - """do_word : maybe_do_word""" - p[0] = p[1] - -def p_until_word(p): - """until_word : maybe_until_word""" - p[0] = p[1] - -def p_assignment_word(p): - """assignment_word : maybe_assignment_word""" - p[0] = ('assignment_word', p[1][1]) - -def p_bang_word(p): - """bang_word : maybe_bang_word""" - p[0] = ('bang_word', p[1][1]) - -def p_token(p): - """token : TOKEN - | Fi""" - p[0] = p[1] - -def p_empty(p): - 'empty :' - p[0] = None - -# Error rule for syntax errors -def p_error(p): - msg = [] - w = msg.append - if p: - w('%r\n' % p) - w('followed by:\n') - for i in range(5): - n = yacc.token() - if not n: - break - w(' %r\n' % n) - else: - w('Unexpected EOF') - raise sherrors.ShellSyntaxError(''.join(msg)) - -# Build the parser -try: - import pyshtables -except ImportError: - outputdir = os.path.dirname(__file__) - if not os.access(outputdir, os.W_OK): - outputdir = '' - yacc.yacc(tabmodule = 'pyshtables', outputdir = outputdir, debug = 0) -else: - yacc.yacc(tabmodule = 'pysh.pyshtables', write_tables = 0, debug = 0) - - -def parse(input, eof=False, debug=False): - """Parse a whole script at once and return the generated AST and unconsumed - data in a tuple. - - NOTE: eof is probably meaningless for now, the parser being unable to work - in pull mode. It should be set to True. - """ - lexer = pyshlex.PLYLexer() - remaining = lexer.add(input, eof) - if lexer.is_empty(): - return [], remaining - if debug: - debug = 2 - return yacc.parse(lexer=lexer, debug=debug), remaining - -#------------------------------------------------------------------------------- -# AST rendering helpers -#------------------------------------------------------------------------------- - -def format_commands(v): - """Return a tree made of strings and lists. Make command trees easier to - display. - """ - if isinstance(v, list): - return [format_commands(c) for c in v] - if isinstance(v, tuple): - if len(v)==2 and isinstance(v[0], str) and not isinstance(v[1], str): - if v[0] == 'async': - return ['AsyncList', map(format_commands, v[1])] - else: - #Avoid decomposing tuples like ('pipeline', Pipeline(...)) - return format_commands(v[1]) - return format_commands(list(v)) - elif isinstance(v, IfCond): - name = ['IfCond'] - name += ['if', map(format_commands, v.cond)] - name += ['then', map(format_commands, v.if_cmds)] - name += ['else', map(format_commands, v.else_cmds)] - return name - elif isinstance(v, ForLoop): - name = ['ForLoop'] - name += [repr(v.name)+' in ', map(str, v.items)] - name += ['commands', map(format_commands, v.cmds)] - return name - elif isinstance(v, AndOr): - return [v.op, format_commands(v.left), format_commands(v.right)] - elif isinstance(v, Pipeline): - name = 'Pipeline' - if v.reverse_status: - name = '!' + name - return [name, format_commands(v.commands)] - elif isinstance(v, Case): - name = ['Case'] - name += [v.name, format_commands(v.items)] - elif isinstance(v, SimpleCommand): - name = ['SimpleCommand'] - if v.words: - name += ['words', map(str, v.words)] - if v.assigns: - assigns = [tuple(a[1]) for a in v.assigns] - name += ['assigns', map(str, assigns)] - if v.redirs: - name += ['redirs', map(format_commands, v.redirs)] - return name - elif isinstance(v, RedirectList): - name = ['RedirectList'] - if v.redirs: - name += ['redirs', map(format_commands, v.redirs)] - name += ['command', format_commands(v.cmd)] - return name - elif isinstance(v, IORedirect): - return ' '.join(map(str, (v.io_number, v.op, v.filename))) - elif isinstance(v, HereDocument): - return ' '.join(map(str, (v.io_number, v.op, repr(v.name), repr(v.content)))) - elif isinstance(v, SubShell): - return ['SubShell', map(format_commands, v.cmds)] - else: - return repr(v) - -def print_commands(cmds, output=sys.stdout): - """Pretty print a command tree.""" - def print_tree(cmd, spaces, output): - if isinstance(cmd, list): - for c in cmd: - print_tree(c, spaces + 3, output) - else: - print >>output, ' '*spaces + str(cmd) - - formatted = format_commands(cmds) - print_tree(formatted, 0, output) - - -def stringify_commands(cmds): - """Serialize a command tree as a string. - - Returned string is not pretty and is currently used for unit tests only. - """ - def stringify(value): - output = [] - if isinstance(value, list): - formatted = [] - for v in value: - formatted.append(stringify(v)) - formatted = ' '.join(formatted) - output.append(''.join(['<', formatted, '>'])) - else: - output.append(value) - return ' '.join(output) - - return stringify(format_commands(cmds)) - - -def visit_commands(cmds, callable): - """Visit the command tree and execute callable on every Pipeline and - SimpleCommand instances. - """ - if isinstance(cmds, (tuple, list)): - map(lambda c: visit_commands(c,callable), cmds) - elif isinstance(cmds, (Pipeline, SimpleCommand)): - callable(cmds) diff --git a/bitbake/lib/bb/pysh/sherrors.py b/bitbake/lib/bb/pysh/sherrors.py deleted file mode 100644 index 3fe8e47b2c..0000000000 --- a/bitbake/lib/bb/pysh/sherrors.py +++ /dev/null @@ -1,15 +0,0 @@ -# sherrors.py - shell errors and signals -# -# Copyright 2007 Patrick Mezard -# -# This software may be used and distributed according to the terms -# of the GNU General Public License, incorporated herein by reference. - -"""Define shell exceptions and error codes. -""" - -class ShellError(Exception): - pass - -class ShellSyntaxError(ShellError): - pass diff --git a/bitbake/lib/bb/remotedata.py b/bitbake/lib/bb/remotedata.py deleted file mode 100644 index 6c9864dd6b..0000000000 --- a/bitbake/lib/bb/remotedata.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -BitBake 'remotedata' module - -Provides support for using a datastore from the bitbake client -""" - -# Copyright (C) 2016 Intel Corporation -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import bb.data - -class RemoteDatastores: - """Used on the server side to manage references to server-side datastores""" - def __init__(self, cooker): - self.cooker = cooker - self.datastores = {} - self.locked = [] - self.datastores[0] = self.cooker.data - self.nextindex = 1 - - def __len__(self): - return len(self.datastores) - - def __getitem__(self, key): - # Cooker could have changed its datastore from under us - self.datastores[0] = self.cooker.data - return self.datastores[key] - - def items(self): - return self.datastores.items() - - def store(self, d, locked=False): - """ - Put a datastore into the collection. If locked=True then the datastore - is understood to be managed externally and cannot be released by calling - release(). - """ - idx = self.nextindex - self.datastores[idx] = d - if locked: - self.locked.append(idx) - self.nextindex += 1 - return idx - - def check_store(self, d, locked=False): - """ - Put a datastore into the collection if it's not already in there; - in either case return the index - """ - for key, val in self.datastores.items(): - if val is d: - idx = key - break - else: - idx = self.store(d, locked) - return idx - - def release(self, idx): - """Discard a datastore in the collection""" - if idx in self.locked: - raise Exception('Tried to release locked datastore %d' % idx) - del self.datastores[idx] - diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py deleted file mode 100644 index 63d4edd892..0000000000 --- a/bitbake/lib/bb/runqueue.py +++ /dev/null @@ -1,3404 +0,0 @@ -""" -BitBake 'RunQueue' implementation - -Handles preparation and execution of a queue of tasks -""" - -# Copyright (C) 2006-2007 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import copy -import enum -import os -import sys -import stat -import errno -import itertools -import logging -import re -import bb -from bb import msg, event -from bb import monitordisk -import subprocess -import pickle -from multiprocessing import Process -import shlex -import pprint -import time - -bblogger = logging.getLogger("BitBake") -logger = logging.getLogger("BitBake.RunQueue") -hashequiv_logger = logging.getLogger("BitBake.RunQueue.HashEquiv") - -__find_sha256__ = re.compile( r'(?i)(?= 2: - return tid.split(':')[1] - return "" - -def split_tid(tid): - (mc, fn, taskname, _) = split_tid_mcfn(tid) - return (mc, fn, taskname) - -def split_mc(n): - if n.startswith("mc:") and n.count(':') >= 2: - _, mc, n = n.split(":", 2) - return (mc, n) - return ('', n) - -def split_tid_mcfn(tid): - if tid.startswith('mc:') and tid.count(':') >= 2: - elems = tid.split(':') - mc = elems[1] - fn = ":".join(elems[2:-1]) - taskname = elems[-1] - mcfn = "mc:" + mc + ":" + fn - else: - tid = tid.rsplit(":", 1) - mc = "" - fn = tid[0] - taskname = tid[1] - mcfn = fn - - return (mc, fn, taskname, mcfn) - -def build_tid(mc, fn, taskname): - if mc: - return "mc:" + mc + ":" + fn + ":" + taskname - return fn + ":" + taskname - -# Index used to pair up potentially matching multiconfig tasks -# We match on PN, taskname and hash being equal -def pending_hash_index(tid, rqdata): - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - pn = rqdata.dataCaches[mc].pkg_fn[taskfn] - h = rqdata.runtaskentries[tid].unihash - return pn + ":" + "taskname" + h - -class RunQueueStats: - """ - Holds statistics on the tasks handled by the associated runQueue - """ - def __init__(self, total, setscene_total): - self.completed = 0 - self.skipped = 0 - self.failed = 0 - self.active = 0 - self.setscene_active = 0 - self.setscene_covered = 0 - self.setscene_notcovered = 0 - self.setscene_total = setscene_total - self.total = total - - def copy(self): - obj = self.__class__(self.total, self.setscene_total) - obj.__dict__.update(self.__dict__) - return obj - - def taskFailed(self): - self.active = self.active - 1 - self.failed = self.failed + 1 - - def taskCompleted(self): - self.active = self.active - 1 - self.completed = self.completed + 1 - - def taskSkipped(self): - self.active = self.active + 1 - self.skipped = self.skipped + 1 - - def taskActive(self): - self.active = self.active + 1 - - def updateCovered(self, covered, notcovered): - self.setscene_covered = covered - self.setscene_notcovered = notcovered - - def updateActiveSetscene(self, active): - self.setscene_active = active - - -# Indicates the next step due to run in the runQueue state machine -class RunQueueState(enum.Enum): - PREPARE = 0 - SCENE_INIT = 1 - DUMP_SIGS = 2 - RUNNING = 3 - FAILED = 4 - CLEAN_UP = 5 - COMPLETE = 6 - -class RunQueueScheduler(object): - """ - Control the order tasks are scheduled in. - """ - name = "basic" - - def __init__(self, runqueue, rqdata): - """ - The default scheduler just returns the first buildable task (the - priority map is sorted by task number) - """ - self.rq = runqueue - self.rqdata = rqdata - self.numTasks = len(self.rqdata.runtaskentries) - - self.prio_map = [self.rqdata.runtaskentries.keys()] - - self.buildable = set() - self.skip_maxthread = {} - self.stamps = {} - for tid in self.rqdata.runtaskentries: - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - self.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) - if tid in self.rq.runq_buildable: - self.buildable.add(tid) - - self.rev_prio_map = None - self.is_pressure_usable() - - def is_pressure_usable(self): - """ - If monitoring pressure, return True if pressure files can be open and read. For example - openSUSE /proc/pressure/* files have readable file permissions but when read the error EOPNOTSUPP (Operation not supported) - is returned. - """ - if self.rq.max_cpu_pressure or self.rq.max_io_pressure or self.rq.max_memory_pressure: - try: - with open("/proc/pressure/cpu") as cpu_pressure_fds, \ - open("/proc/pressure/io") as io_pressure_fds, \ - open("/proc/pressure/memory") as memory_pressure_fds: - - self.prev_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1] - self.prev_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1] - self.prev_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1] - self.prev_pressure_time = time.time() - self.check_pressure = True - except: - bb.note("The /proc/pressure files can't be read. Continuing build without monitoring pressure") - self.check_pressure = False - else: - self.check_pressure = False - - def exceeds_max_pressure(self): - """ - Monitor the difference in total pressure at least once per second, if - BB_PRESSURE_MAX_{CPU|IO|MEMORY} are set, return True if above threshold. - """ - if self.check_pressure: - with open("/proc/pressure/cpu") as cpu_pressure_fds, \ - open("/proc/pressure/io") as io_pressure_fds, \ - open("/proc/pressure/memory") as memory_pressure_fds: - # extract "total" from /proc/pressure/{cpu|io} - curr_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1] - curr_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1] - curr_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1] - now = time.time() - tdiff = now - self.prev_pressure_time - psi_accumulation_interval = 1.0 - cpu_pressure = (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) / tdiff - io_pressure = (float(curr_io_pressure) - float(self.prev_io_pressure)) / tdiff - memory_pressure = (float(curr_memory_pressure) - float(self.prev_memory_pressure)) / tdiff - exceeds_cpu_pressure = self.rq.max_cpu_pressure and cpu_pressure > self.rq.max_cpu_pressure - exceeds_io_pressure = self.rq.max_io_pressure and io_pressure > self.rq.max_io_pressure - exceeds_memory_pressure = self.rq.max_memory_pressure and memory_pressure > self.rq.max_memory_pressure - - if tdiff > psi_accumulation_interval: - self.prev_cpu_pressure = curr_cpu_pressure - self.prev_io_pressure = curr_io_pressure - self.prev_memory_pressure = curr_memory_pressure - self.prev_pressure_time = now - - pressure_state = (exceeds_cpu_pressure, exceeds_io_pressure, exceeds_memory_pressure) - pressure_values = (round(cpu_pressure,1), self.rq.max_cpu_pressure, round(io_pressure,1), self.rq.max_io_pressure, round(memory_pressure,1), self.rq.max_memory_pressure) - if hasattr(self, "pressure_state") and pressure_state != self.pressure_state: - bb.note("Pressure status changed to CPU: %s, IO: %s, Mem: %s (CPU: %s/%s, IO: %s/%s, Mem: %s/%s) - using %s/%s bitbake threads" % (pressure_state + pressure_values + (len(self.rq.runq_running.difference(self.rq.runq_complete)), self.rq.number_tasks))) - self.pressure_state = pressure_state - return (exceeds_cpu_pressure or exceeds_io_pressure or exceeds_memory_pressure) - elif self.rq.max_loadfactor: - limit = False - loadfactor = float(os.getloadavg()[0]) / os.cpu_count() - # bb.warn("Comparing %s to %s" % (loadfactor, self.rq.max_loadfactor)) - if loadfactor > self.rq.max_loadfactor: - limit = True - if hasattr(self, "loadfactor_limit") and limit != self.loadfactor_limit: - bb.note("Load average limiting set to %s as load average: %s - using %s/%s bitbake threads" % (limit, loadfactor, len(self.rq.runq_running.difference(self.rq.runq_complete)), self.rq.number_tasks)) - self.loadfactor_limit = limit - return limit - return False - - def next_buildable_task(self): - """ - Return the id of the first task we find that is buildable - """ - # Once tasks are running we don't need to worry about them again - self.buildable.difference_update(self.rq.runq_running) - buildable = set(self.buildable) - buildable.difference_update(self.rq.holdoff_tasks) - buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered) - if not buildable: - return None - - # Bitbake requires that at least one task be active. Only check for pressure if - # this is the case, otherwise the pressure limitation could result in no tasks - # being active and no new tasks started thereby, at times, breaking the scheduler. - if self.rq.stats.active and self.exceeds_max_pressure(): - return None - - # Filter out tasks that have a max number of threads that have been exceeded - skip_buildable = {} - for running in self.rq.runq_running.difference(self.rq.runq_complete): - rtaskname = taskname_from_tid(running) - if rtaskname not in self.skip_maxthread: - self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads") - if not self.skip_maxthread[rtaskname]: - continue - if rtaskname in skip_buildable: - skip_buildable[rtaskname] += 1 - else: - skip_buildable[rtaskname] = 1 - - if len(buildable) == 1: - tid = buildable.pop() - taskname = taskname_from_tid(tid) - if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]): - return None - stamp = self.stamps[tid] - if stamp not in self.rq.build_stamps.values(): - return tid - - if not self.rev_prio_map: - self.rev_prio_map = {} - for tid in self.rqdata.runtaskentries: - self.rev_prio_map[tid] = self.prio_map.index(tid) - - best = None - bestprio = None - for tid in buildable: - prio = self.rev_prio_map[tid] - if bestprio is None or bestprio > prio: - taskname = taskname_from_tid(tid) - if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]): - continue - stamp = self.stamps[tid] - if stamp in self.rq.build_stamps.values(): - continue - bestprio = prio - best = tid - - return best - - def next(self): - """ - Return the id of the task we should build next - """ - if self.rq.can_start_task(): - return self.next_buildable_task() - - def newbuildable(self, task): - self.buildable.add(task) - - def removebuildable(self, task): - self.buildable.remove(task) - - def describe_task(self, taskid): - result = 'ID %s' % taskid - if self.rev_prio_map: - result = result + (' pri %d' % self.rev_prio_map[taskid]) - return result - - def dump_prio(self, comment): - bb.debug(3, '%s (most important first):\n%s' % - (comment, - '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for - index, taskid in enumerate(self.prio_map)]))) - -class RunQueueSchedulerSpeed(RunQueueScheduler): - """ - A scheduler optimised for speed. The priority map is sorted by task weight, - heavier weighted tasks (tasks needed by the most other tasks) are run first. - """ - name = "speed" - - def __init__(self, runqueue, rqdata): - """ - The priority map is sorted by task weight. - """ - RunQueueScheduler.__init__(self, runqueue, rqdata) - - weights = {} - for tid in self.rqdata.runtaskentries: - weight = self.rqdata.runtaskentries[tid].weight - if not weight in weights: - weights[weight] = [] - weights[weight].append(tid) - - self.prio_map = [] - for weight in sorted(weights): - for w in weights[weight]: - self.prio_map.append(w) - - self.prio_map.reverse() - -class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): - """ - A scheduler optimised to complete .bb files as quickly as possible. The - priority map is sorted by task weight, but then reordered so once a given - .bb file starts to build, it's completed as quickly as possible by - running all tasks related to the same .bb file one after the after. - This works well where disk space is at a premium and classes like OE's - rm_work are in force. - """ - name = "completion" - - def __init__(self, runqueue, rqdata): - super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata) - - # Extract list of tasks for each recipe, with tasks sorted - # ascending from "must run first" (typically do_fetch) to - # "runs last" (do_build). The speed scheduler prioritizes - # tasks that must run first before the ones that run later; - # this is what we depend on here. - task_lists = {} - for taskid in self.prio_map: - fn, taskname = taskid.rsplit(':', 1) - task_lists.setdefault(fn, []).append(taskname) - - # Now unify the different task lists. The strategy is that - # common tasks get skipped and new ones get inserted after the - # preceeding common one(s) as they are found. Because task - # lists should differ only by their number of tasks, but not - # the ordering of the common tasks, this should result in a - # deterministic result that is a superset of the individual - # task ordering. - all_tasks = [] - for recipe, new_tasks in task_lists.items(): - index = 0 - old_task = all_tasks[index] if index < len(all_tasks) else None - for new_task in new_tasks: - if old_task == new_task: - # Common task, skip it. This is the fast-path which - # avoids a full search. - index += 1 - old_task = all_tasks[index] if index < len(all_tasks) else None - else: - try: - index = all_tasks.index(new_task) - # Already present, just not at the current - # place. We re-synchronized by changing the - # index so that it matches again. Now - # move on to the next existing task. - index += 1 - old_task = all_tasks[index] if index < len(all_tasks) else None - except ValueError: - # Not present. Insert before old_task, which - # remains the same (but gets shifted back). - all_tasks.insert(index, new_task) - index += 1 - bb.debug(3, 'merged task list: %s' % all_tasks) - - # Now reverse the order so that tasks that finish the work on one - # recipe are considered more imporant (= come first). The ordering - # is now so that do_build is most important. - all_tasks.reverse() - - # Group tasks of the same kind before tasks of less important - # kinds at the head of the queue (because earlier = lower - # priority number = runs earlier), while preserving the - # ordering by recipe. If recipe foo is more important than - # bar, then the goal is to work on foo's do_populate_sysroot - # before bar's do_populate_sysroot and on the more important - # tasks of foo before any of the less important tasks in any - # other recipe (if those other recipes are more important than - # foo). - # - # All of this only applies when tasks are runable. Explicit - # dependencies still override this ordering by priority. - # - # Here's an example why this priority re-ordering helps with - # minimizing disk usage. Consider a recipe foo with a higher - # priority than bar where foo DEPENDS on bar. Then the - # implicit rule (from base.bbclass) is that foo's do_configure - # depends on bar's do_populate_sysroot. This ensures that - # bar's do_populate_sysroot gets done first. Normally the - # tasks from foo would continue to run once that is done, and - # bar only gets completed and cleaned up later. By ordering - # bar's task that depend on bar's do_populate_sysroot before foo's - # do_configure, that problem gets avoided. - task_index = 0 - self.dump_prio('original priorities') - for task in all_tasks: - for index in range(task_index, self.numTasks): - taskid = self.prio_map[index] - taskname = taskid.rsplit(':', 1)[1] - if taskname == task: - del self.prio_map[index] - self.prio_map.insert(task_index, taskid) - task_index += 1 - self.dump_prio('completion priorities') - -class RunTaskEntry(object): - def __init__(self): - self.depends = set() - self.revdeps = set() - self.hash = None - self.unihash = None - self.task = None - self.weight = 1 - -class RunQueueData: - """ - BitBake Run Queue implementation - """ - def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets): - self.cooker = cooker - self.dataCaches = dataCaches - self.taskData = taskData - self.targets = targets - self.rq = rq - self.warn_multi_bb = False - - self.multi_provider_allowed = (cfgData.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split() - self.setscene_ignore_tasks = get_setscene_enforce_ignore_tasks(cfgData, targets) - self.setscene_ignore_tasks_checked = False - self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1") - self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter() - - self.reset() - - def reset(self): - self.runtaskentries = {} - - def runq_depends_names(self, ids): - ret = [] - for id in ids: - nam = os.path.basename(id) - nam = re.sub("_[^,]*,", ",", nam) - ret.extend([nam]) - return ret - - def get_task_hash(self, tid): - return self.runtaskentries[tid].hash - - def get_task_unihash(self, tid): - return self.runtaskentries[tid].unihash - - def get_user_idstring(self, tid, task_name_suffix = ""): - return tid + task_name_suffix - - def get_short_user_idstring(self, task, task_name_suffix = ""): - (mc, fn, taskname, taskfn) = split_tid_mcfn(task) - pn = self.dataCaches[mc].pkg_fn[taskfn] - taskname = taskname_from_tid(task) + task_name_suffix - return "%s:%s" % (pn, taskname) - - def circular_depchains_handler(self, tasks): - """ - Some tasks aren't buildable, likely due to circular dependency issues. - Identify the circular dependencies and print them in a user readable format. - """ - from copy import deepcopy - - valid_chains = [] - explored_deps = {} - msgs = [] - - class TooManyLoops(Exception): - pass - - def chain_reorder(chain): - """ - Reorder a dependency chain so the lowest task id is first - """ - lowest = 0 - new_chain = [] - for entry in range(len(chain)): - if chain[entry] < chain[lowest]: - lowest = entry - new_chain.extend(chain[lowest:]) - new_chain.extend(chain[:lowest]) - return new_chain - - def chain_compare_equal(chain1, chain2): - """ - Compare two dependency chains and see if they're the same - """ - if len(chain1) != len(chain2): - return False - for index in range(len(chain1)): - if chain1[index] != chain2[index]: - return False - return True - - def chain_array_contains(chain, chain_array): - """ - Return True if chain_array contains chain - """ - for ch in chain_array: - if chain_compare_equal(ch, chain): - return True - return False - - def find_chains(tid, prev_chain): - prev_chain.append(tid) - total_deps = [] - total_deps.extend(self.runtaskentries[tid].revdeps) - for revdep in self.runtaskentries[tid].revdeps: - if revdep in prev_chain: - idx = prev_chain.index(revdep) - # To prevent duplicates, reorder the chain to start with the lowest taskid - # and search through an array of those we've already printed - chain = prev_chain[idx:] - new_chain = chain_reorder(chain) - if not chain_array_contains(new_chain, valid_chains): - valid_chains.append(new_chain) - msgs.append("Dependency loop #%d found:\n" % len(valid_chains)) - for dep in new_chain: - msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends))) - msgs.append("\n") - if len(valid_chains) > 10: - msgs.append("Halted dependency loops search after 10 matches.\n") - raise TooManyLoops - continue - scan = False - if revdep not in explored_deps: - scan = True - elif revdep in explored_deps[revdep]: - scan = True - else: - for dep in prev_chain: - if dep in explored_deps[revdep]: - scan = True - if scan: - find_chains(revdep, copy.deepcopy(prev_chain)) - for dep in explored_deps[revdep]: - if dep not in total_deps: - total_deps.append(dep) - - explored_deps[tid] = total_deps - - try: - for task in tasks: - find_chains(task, []) - except TooManyLoops: - pass - - return msgs - - def calculate_task_weights(self, endpoints): - """ - Calculate a number representing the "weight" of each task. Heavier weighted tasks - have more dependencies and hence should be executed sooner for maximum speed. - - This function also sanity checks the task list finding tasks that are not - possible to execute due to circular dependencies. - """ - - numTasks = len(self.runtaskentries) - weight = {} - deps_left = {} - task_done = {} - - for tid in self.runtaskentries: - task_done[tid] = False - weight[tid] = 1 - deps_left[tid] = len(self.runtaskentries[tid].revdeps) - - for tid in endpoints: - weight[tid] = 10 - task_done[tid] = True - - while True: - next_points = [] - for tid in endpoints: - for revdep in self.runtaskentries[tid].depends: - weight[revdep] = weight[revdep] + weight[tid] - deps_left[revdep] = deps_left[revdep] - 1 - if deps_left[revdep] == 0: - next_points.append(revdep) - task_done[revdep] = True - endpoints = next_points - if not next_points: - break - - # Circular dependency sanity check - problem_tasks = [] - for tid in self.runtaskentries: - if task_done[tid] is False or deps_left[tid] != 0: - problem_tasks.append(tid) - logger.debug2("Task %s is not buildable", tid) - logger.debug2("(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid]) - self.runtaskentries[tid].weight = weight[tid] - - if problem_tasks: - message = "%s unbuildable tasks were found.\n" % len(problem_tasks) - message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n" - message = message + "Identifying dependency loops (this may take a short while)...\n" - logger.error(message) - - msgs = self.circular_depchains_handler(problem_tasks) - - message = "\n" - for msg in msgs: - message = message + msg - bb.msg.fatal("RunQueue", message) - - return weight - - def prepare(self): - """ - Turn a set of taskData into a RunQueue and compute data needed - to optimise the execution order. - """ - - runq_build = {} - recursivetasks = {} - recursiveitasks = {} - recursivetasksselfref = set() - - taskData = self.taskData - - found = False - for mc in self.taskData: - if taskData[mc].taskentries: - found = True - break - if not found: - # Nothing to do - return 0 - - bb.parse.siggen.setup_datacache(self.dataCaches) - - self.init_progress_reporter.start() - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Step A - Work out a list of tasks to run - # - # Taskdata gives us a list of possible providers for every build and run - # target ordered by priority. It also gives information on each of those - # providers. - # - # To create the actual list of tasks to execute we fix the list of - # providers and then resolve the dependencies into task IDs. This - # process is repeated for each type of dependency (tdepends, deptask, - # rdeptast, recrdeptask, idepends). - - def add_build_dependencies(depids, tasknames, depends, mc): - for depname in depids: - # Won't be in build_targets if ASSUME_PROVIDED - if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]: - continue - depdata = taskData[mc].build_targets[depname][0] - if depdata is None: - continue - for taskname in tasknames: - t = depdata + ":" + taskname - if t in taskData[mc].taskentries: - depends.add(t) - - def add_runtime_dependencies(depids, tasknames, depends, mc): - for depname in depids: - if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]: - continue - depdata = taskData[mc].run_targets[depname][0] - if depdata is None: - continue - for taskname in tasknames: - t = depdata + ":" + taskname - if t in taskData[mc].taskentries: - depends.add(t) - - def add_mc_dependencies(mc, tid): - mcdeps = taskData[mc].get_mcdepends() - for dep in mcdeps: - mcdependency = dep.split(':') - pn = mcdependency[3] - frommc = mcdependency[1] - mcdep = mcdependency[2] - deptask = mcdependency[4] - if mcdep not in taskData: - bb.fatal("Multiconfig '%s' is referenced in multiconfig dependency '%s' but not enabled in BBMULTICONFIG?" % (mcdep, dep)) - if mc == frommc: - fn = taskData[mcdep].build_targets[pn][0] - newdep = '%s:%s' % (fn,deptask) - if newdep not in taskData[mcdep].taskentries: - bb.fatal("Task mcdepends on non-existent task %s" % (newdep)) - taskData[mc].taskentries[tid].tdepends.append(newdep) - - for mc in taskData: - for tid in taskData[mc].taskentries: - - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - #runtid = build_tid(mc, fn, taskname) - - #logger.debug2("Processing %s,%s:%s", mc, fn, taskname) - - depends = set() - task_deps = self.dataCaches[mc].task_deps[taskfn] - - self.runtaskentries[tid] = RunTaskEntry() - - if fn in taskData[mc].failed_fns: - continue - - # We add multiconfig dependencies before processing internal task deps (tdepends) - if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']: - add_mc_dependencies(mc, tid) - - # Resolve task internal dependencies - # - # e.g. addtask before X after Y - for t in taskData[mc].taskentries[tid].tdepends: - (depmc, depfn, deptaskname, _) = split_tid_mcfn(t) - depends.add(build_tid(depmc, depfn, deptaskname)) - - # Resolve 'deptask' dependencies - # - # e.g. do_sometask[deptask] = "do_someothertask" - # (makes sure sometask runs after someothertask of all DEPENDS) - if 'deptask' in task_deps and taskname in task_deps['deptask']: - tasknames = task_deps['deptask'][taskname].split() - add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc) - - # Resolve 'rdeptask' dependencies - # - # e.g. do_sometask[rdeptask] = "do_someothertask" - # (makes sure sometask runs after someothertask of all RDEPENDS) - if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']: - tasknames = task_deps['rdeptask'][taskname].split() - add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc) - - # Resolve inter-task dependencies - # - # e.g. do_sometask[depends] = "targetname:do_someothertask" - # (makes sure sometask runs after targetname's someothertask) - idepends = taskData[mc].taskentries[tid].idepends - for (depname, idependtask) in idepends: - if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps: - # Won't be in build_targets if ASSUME_PROVIDED - depdata = taskData[mc].build_targets[depname][0] - if depdata is not None: - t = depdata + ":" + idependtask - depends.add(t) - if t not in taskData[mc].taskentries: - bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) - irdepends = taskData[mc].taskentries[tid].irdepends - for (depname, idependtask) in irdepends: - if depname in taskData[mc].run_targets: - # Won't be in run_targets if ASSUME_PROVIDED - if not taskData[mc].run_targets[depname]: - continue - depdata = taskData[mc].run_targets[depname][0] - if depdata is not None: - t = depdata + ":" + idependtask - depends.add(t) - if t not in taskData[mc].taskentries: - bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) - - # Resolve recursive 'recrdeptask' dependencies (Part A) - # - # e.g. do_sometask[recrdeptask] = "do_someothertask" - # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively) - # We cover the recursive part of the dependencies below - if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']: - tasknames = task_deps['recrdeptask'][taskname].split() - recursivetasks[tid] = tasknames - add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc) - add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc) - if taskname in tasknames: - recursivetasksselfref.add(tid) - - if 'recideptask' in task_deps and taskname in task_deps['recideptask']: - recursiveitasks[tid] = [] - for t in task_deps['recideptask'][taskname].split(): - newdep = build_tid(mc, fn, t) - recursiveitasks[tid].append(newdep) - - self.runtaskentries[tid].depends = depends - # Remove all self references - self.runtaskentries[tid].depends.discard(tid) - - #self.dump_data() - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Resolve recursive 'recrdeptask' dependencies (Part B) - # - # e.g. do_sometask[recrdeptask] = "do_someothertask" - # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively) - # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed - - # Generating/interating recursive lists of dependencies is painful and potentially slow - # Precompute recursive task dependencies here by: - # a) create a temp list of reverse dependencies (revdeps) - # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0) - # c) combine the total list of dependencies in cumulativedeps - # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower) - - - revdeps = {} - deps = {} - cumulativedeps = {} - for tid in self.runtaskentries: - deps[tid] = set(self.runtaskentries[tid].depends) - revdeps[tid] = set() - cumulativedeps[tid] = set() - # Generate a temp list of reverse dependencies - for tid in self.runtaskentries: - for dep in self.runtaskentries[tid].depends: - revdeps[dep].add(tid) - # Find the dependency chain endpoints - endpoints = set() - for tid in self.runtaskentries: - if not deps[tid]: - endpoints.add(tid) - # Iterate the chains collating dependencies - while endpoints: - next = set() - for tid in endpoints: - for dep in revdeps[tid]: - cumulativedeps[dep].add(fn_from_tid(tid)) - cumulativedeps[dep].update(cumulativedeps[tid]) - if tid in deps[dep]: - deps[dep].remove(tid) - if not deps[dep]: - next.add(dep) - endpoints = next - #for tid in deps: - # if deps[tid]: - # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid])) - - # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to - # resolve these recursively until we aren't adding any further extra dependencies - extradeps = True - while extradeps: - extradeps = 0 - for tid in recursivetasks: - tasknames = recursivetasks[tid] - - totaldeps = set(self.runtaskentries[tid].depends) - if tid in recursiveitasks: - totaldeps.update(recursiveitasks[tid]) - for dep in recursiveitasks[tid]: - if dep not in self.runtaskentries: - continue - totaldeps.update(self.runtaskentries[dep].depends) - - deps = set() - for dep in totaldeps: - if dep in cumulativedeps: - deps.update(cumulativedeps[dep]) - - for t in deps: - for taskname in tasknames: - newtid = t + ":" + taskname - if newtid == tid: - continue - if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends: - extradeps += 1 - self.runtaskentries[tid].depends.add(newtid) - - # Handle recursive tasks which depend upon other recursive tasks - deps = set() - for dep in self.runtaskentries[tid].depends.intersection(recursivetasks): - deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends)) - for newtid in deps: - for taskname in tasknames: - if not newtid.endswith(":" + taskname): - continue - if newtid in self.runtaskentries: - extradeps += 1 - self.runtaskentries[tid].depends.add(newtid) - - bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps) - - # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work - for tid in recursivetasksselfref: - self.runtaskentries[tid].depends.difference_update(recursivetasksselfref) - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - #self.dump_data() - - # Step B - Mark all active tasks - # - # Start with the tasks we were asked to run and mark all dependencies - # as active too. If the task is to be 'forced', clear its stamp. Once - # all active tasks are marked, prune the ones we don't need. - - logger.verbose("Marking Active Tasks") - - def mark_active(tid, depth): - """ - Mark an item as active along with its depends - (calls itself recursively) - """ - - if tid in runq_build: - return - - runq_build[tid] = 1 - - depends = self.runtaskentries[tid].depends - for depend in depends: - mark_active(depend, depth+1) - - def invalidate_task(tid, error_nostamp): - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - taskdep = self.dataCaches[mc].task_deps[taskfn] - if fn + ":" + taskname not in taskData[mc].taskentries: - logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname) - if 'nostamp' in taskdep and taskname in taskdep['nostamp']: - if error_nostamp: - bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname) - else: - bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname) - else: - logger.verbose("Invalidate task %s, %s", taskname, fn) - bb.parse.siggen.invalidate_task(taskname, taskfn) - - self.target_tids = [] - for (mc, target, task, fn) in self.targets: - - if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]: - continue - - if target in taskData[mc].failed_deps: - continue - - parents = False - if task.endswith('-'): - parents = True - task = task[:-1] - - if fn in taskData[mc].failed_fns: - continue - - # fn already has mc prefix - tid = fn + ":" + task - self.target_tids.append(tid) - if tid not in taskData[mc].taskentries: - import difflib - tasks = [] - for x in taskData[mc].taskentries: - if x.startswith(fn + ":"): - tasks.append(taskname_from_tid(x)) - close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7) - if close_matches: - extra = ". Close matches:\n %s" % "\n ".join(close_matches) - else: - extra = "" - bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra)) - - # For tasks called "XXXX-", ony run their dependencies - if parents: - for i in self.runtaskentries[tid].depends: - mark_active(i, 1) - else: - mark_active(tid, 1) - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Step C - Prune all inactive tasks - # - # Once all active tasks are marked, prune the ones we don't need. - - # Handle --runall - if self.cooker.configuration.runall: - # re-run the mark_active and then drop unused tasks from new list - - runall_tids = set() - added = True - while added: - reduced_tasklist = set(self.runtaskentries.keys()) - for tid in list(self.runtaskentries.keys()): - if tid not in runq_build: - reduced_tasklist.remove(tid) - runq_build = {} - - orig = runall_tids - runall_tids = set() - for task in self.cooker.configuration.runall: - if not task.startswith("do_"): - task = "do_{0}".format(task) - for tid in reduced_tasklist: - wanttid = "{0}:{1}".format(fn_from_tid(tid), task) - if wanttid in self.runtaskentries: - runall_tids.add(wanttid) - - for tid in list(runall_tids): - mark_active(tid, 1) - self.target_tids.append(tid) - if self.cooker.configuration.force: - invalidate_task(tid, False) - added = runall_tids - orig - - delcount = set() - for tid in list(self.runtaskentries.keys()): - if tid not in runq_build: - delcount.add(tid) - del self.runtaskentries[tid] - - if self.cooker.configuration.runall: - if not self.runtaskentries: - bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets))) - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Handle runonly - if self.cooker.configuration.runonly: - # re-run the mark_active and then drop unused tasks from new list - runq_build = {} - - for task in self.cooker.configuration.runonly: - if not task.startswith("do_"): - task = "do_{0}".format(task) - runonly_tids = [k for k in self.runtaskentries.keys() if taskname_from_tid(k) == task] - - for tid in runonly_tids: - mark_active(tid, 1) - if self.cooker.configuration.force: - invalidate_task(tid, False) - - for tid in list(self.runtaskentries.keys()): - if tid not in runq_build: - delcount.add(tid) - del self.runtaskentries[tid] - - if not self.runtaskentries: - bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets))) - - # - # Step D - Sanity checks and computation - # - - # Check to make sure we still have tasks to run - if not self.runtaskentries: - if not taskData[''].halt: - bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") - else: - bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.") - - logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries)) - - logger.verbose("Assign Weightings") - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Generate a list of reverse dependencies to ease future calculations - for tid in self.runtaskentries: - for dep in self.runtaskentries[tid].depends: - self.runtaskentries[dep].revdeps.add(tid) - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Identify tasks at the end of dependency chains - # Error on circular dependency loops (length two) - endpoints = [] - for tid in self.runtaskentries: - revdeps = self.runtaskentries[tid].revdeps - if not revdeps: - endpoints.append(tid) - for dep in revdeps: - if dep in self.runtaskentries[tid].depends: - bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep)) - - - logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints)) - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Calculate task weights - # Check of higher length circular dependencies - self.runq_weight = self.calculate_task_weights(endpoints) - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Sanity Check - Check for multiple tasks building the same provider - for mc in self.dataCaches: - prov_list = {} - seen_fn = [] - for tid in self.runtaskentries: - (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid) - if taskfn in seen_fn: - continue - if mc != tidmc: - continue - seen_fn.append(taskfn) - for prov in self.dataCaches[mc].fn_provides[taskfn]: - if prov not in prov_list: - prov_list[prov] = [taskfn] - elif taskfn not in prov_list[prov]: - prov_list[prov].append(taskfn) - for prov in prov_list: - if len(prov_list[prov]) < 2: - continue - if prov in self.multi_provider_allowed: - continue - seen_pn = [] - # If two versions of the same PN are being built its fatal, we don't support it. - for fn in prov_list[prov]: - pn = self.dataCaches[mc].pkg_fn[fn] - if pn not in seen_pn: - seen_pn.append(pn) - else: - bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn)) - msgs = ["Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))] - # - # Construct a list of things which uniquely depend on each provider - # since this may help the user figure out which dependency is triggering this warning - # - msgs.append("\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from.") - deplist = {} - commondeps = None - for provfn in prov_list[prov]: - deps = set() - for tid in self.runtaskentries: - fn = fn_from_tid(tid) - if fn != provfn: - continue - for dep in self.runtaskentries[tid].revdeps: - fn = fn_from_tid(dep) - if fn == provfn: - continue - deps.add(dep) - if not commondeps: - commondeps = set(deps) - else: - commondeps &= deps - deplist[provfn] = deps - for provfn in deplist: - msgs.append("\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))) - # - # Construct a list of provides and runtime providers for each recipe - # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC) - # - msgs.append("\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful.") - provide_results = {} - rprovide_results = {} - commonprovs = None - commonrprovs = None - for provfn in prov_list[prov]: - provides = set(self.dataCaches[mc].fn_provides[provfn]) - rprovides = set() - for rprovide in self.dataCaches[mc].rproviders: - if provfn in self.dataCaches[mc].rproviders[rprovide]: - rprovides.add(rprovide) - for package in self.dataCaches[mc].packages: - if provfn in self.dataCaches[mc].packages[package]: - rprovides.add(package) - for package in self.dataCaches[mc].packages_dynamic: - if provfn in self.dataCaches[mc].packages_dynamic[package]: - rprovides.add(package) - if not commonprovs: - commonprovs = set(provides) - else: - commonprovs &= provides - provide_results[provfn] = provides - if not commonrprovs: - commonrprovs = set(rprovides) - else: - commonrprovs &= rprovides - rprovide_results[provfn] = rprovides - #msgs.append("\nCommon provides:\n %s" % ("\n ".join(commonprovs))) - #msgs.append("\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))) - for provfn in prov_list[prov]: - msgs.append("\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))) - msgs.append("\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))) - - if self.warn_multi_bb: - logger.verbnote("".join(msgs)) - else: - logger.error("".join(msgs)) - - self.init_progress_reporter.next_stage() - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Iterate over the task list looking for tasks with a 'setscene' function - self.runq_setscene_tids = set() - if not self.cooker.configuration.nosetscene: - for tid in self.runtaskentries: - (mc, fn, taskname, _) = split_tid_mcfn(tid) - setscenetid = tid + "_setscene" - if setscenetid not in taskData[mc].taskentries: - continue - self.runq_setscene_tids.add(tid) - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Invalidate task if force mode active - if self.cooker.configuration.force: - for tid in self.target_tids: - invalidate_task(tid, False) - - # Invalidate task if invalidate mode active - if self.cooker.configuration.invalidate_stamp: - for tid in self.target_tids: - fn = fn_from_tid(tid) - for st in self.cooker.configuration.invalidate_stamp.split(','): - if not st.startswith("do_"): - st = "do_%s" % st - invalidate_task(fn + ":" + st, True) - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - # Create and print to the logs a virtual/xxxx -> PN (fn) table - for mc in taskData: - virtmap = taskData[mc].get_providermap(prefix="virtual/") - virtpnmap = {} - for v in virtmap: - virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]] - bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v])) - if hasattr(bb.parse.siggen, "tasks_resolved"): - bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc]) - - self.init_progress_reporter.next_stage() - bb.event.check_for_interrupts() - - bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids) - - starttime = time.time() - lasttime = starttime - - # Iterate over the task list and call into the siggen code - dealtwith = set() - todeal = set(self.runtaskentries) - while todeal: - ready = set() - for tid in todeal.copy(): - if not (self.runtaskentries[tid].depends - dealtwith): - self.runtaskentries[tid].taskhash_deps = bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches) - # get_taskhash for a given tid *must* be called before get_unihash* below - self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches) - ready.add(tid) - unihashes = bb.parse.siggen.get_unihashes(ready) - for tid in ready: - dealtwith.add(tid) - todeal.remove(tid) - self.runtaskentries[tid].unihash = unihashes[tid] - - bb.event.check_for_interrupts() - - if time.time() > (lasttime + 30): - lasttime = time.time() - hashequiv_logger.verbose("Initial setup loop progress: %s of %s in %s" % (len(todeal), len(self.runtaskentries), lasttime - starttime)) - - endtime = time.time() - if (endtime-starttime > 60): - hashequiv_logger.verbose("Initial setup loop took: %s" % (endtime-starttime)) - - bb.parse.siggen.writeout_file_checksum_cache() - - #self.dump_data() - return len(self.runtaskentries) - - def dump_data(self): - """ - Dump some debug information on the internal data structures - """ - logger.debug3("run_tasks:") - for tid in self.runtaskentries: - logger.debug3(" %s: %s Deps %s RevDeps %s", tid, - self.runtaskentries[tid].weight, - self.runtaskentries[tid].depends, - self.runtaskentries[tid].revdeps) - -class RunQueueWorker(): - def __init__(self, process, pipe): - self.process = process - self.pipe = pipe - -class RunQueue: - def __init__(self, cooker, cfgData, dataCaches, taskData, targets): - - self.cooker = cooker - self.cfgData = cfgData - self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets) - - self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None - self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None - - self.state = RunQueueState.PREPARE - - # For disk space monitor - # Invoked at regular time intervals via the bitbake heartbeat event - # while the build is running. We generate a unique name for the handler - # here, just in case that there ever is more than one RunQueue instance, - # start the handler when reaching RunQueueState.SCENE_INIT, and stop it when - # done with the build. - self.dm = monitordisk.diskMonitor(cfgData) - self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self)) - self.dm_event_handler_registered = False - self.rqexe = None - self.worker = {} - self.fakeworker = {} - - @staticmethod - def send_pickled_data(worker, data, name): - msg = bytearray() - msg.extend(b"<" + name.encode() + b">") - pickled_data = pickle.dumps(data) - msg.extend(len(pickled_data).to_bytes(4, 'big')) - msg.extend(pickled_data) - msg.extend(b"") - worker.stdin.write(msg) - - def _start_worker(self, mc, fakeroot = False, rqexec = None): - logger.debug("Starting bitbake-worker") - magic = "decafbad" - if self.cooker.configuration.profile: - magic = "decafbadbad" - fakerootlogs = None - - workerscript = os.path.realpath(os.path.dirname(__file__) + "/../../bin/bitbake-worker") - if fakeroot: - magic = magic + "beef" - mcdata = self.cooker.databuilder.mcdata[mc] - fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD")) - fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split() - env = os.environ.copy() - for key, value in (var.split('=',1) for var in fakerootenv): - env[key] = value - worker = subprocess.Popen(fakerootcmd + [sys.executable, workerscript, magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env) - fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs - else: - worker = subprocess.Popen([sys.executable, workerscript, magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE) - bb.utils.nonblockingfd(worker.stdout) - workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlogs) - - workerdata = { - "sigdata" : bb.parse.siggen.get_taskdata(), - "logdefaultlevel" : bb.msg.loggerDefaultLogLevel, - "build_verbose_shell" : self.cooker.configuration.build_verbose_shell, - "build_verbose_stdout" : self.cooker.configuration.build_verbose_stdout, - "logdefaultdomain" : bb.msg.loggerDefaultDomains, - "prhost" : self.cooker.prhost, - "buildname" : self.cfgData.getVar("BUILDNAME"), - "date" : self.cfgData.getVar("DATE"), - "time" : self.cfgData.getVar("TIME"), - "hashservaddr" : self.cooker.hashservaddr, - "umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"), - } - - RunQueue.send_pickled_data(worker, self.cooker.configuration, "cookerconfig") - RunQueue.send_pickled_data(worker, self.cooker.extraconfigdata, "extraconfigdata") - RunQueue.send_pickled_data(worker, workerdata, "workerdata") - worker.stdin.flush() - - return RunQueueWorker(worker, workerpipe) - - def _teardown_worker(self, worker): - if not worker: - return - logger.debug("Teardown for bitbake-worker") - try: - RunQueue.send_pickled_data(worker.process, b"", "quit") - worker.process.stdin.flush() - worker.process.stdin.close() - except IOError: - pass - while worker.process.returncode is None: - worker.pipe.read() - worker.process.poll() - while worker.pipe.read(): - continue - worker.pipe.close() - - def start_worker(self, rqexec): - if self.worker: - self.teardown_workers() - self.teardown = False - for mc in self.rqdata.dataCaches: - self.worker[mc] = self._start_worker(mc, False, rqexec) - - def start_fakeworker(self, rqexec, mc): - if not mc in self.fakeworker: - self.fakeworker[mc] = self._start_worker(mc, True, rqexec) - - def teardown_workers(self): - self.teardown = True - for mc in self.worker: - self._teardown_worker(self.worker[mc]) - self.worker = {} - for mc in self.fakeworker: - self._teardown_worker(self.fakeworker[mc]) - self.fakeworker = {} - - def read_workers(self): - for mc in self.worker: - self.worker[mc].pipe.read() - for mc in self.fakeworker: - self.fakeworker[mc].pipe.read() - - def active_fds(self): - fds = [] - for mc in self.worker: - fds.append(self.worker[mc].pipe.input) - for mc in self.fakeworker: - fds.append(self.fakeworker[mc].pipe.input) - return fds - - def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None): - def get_timestamp(f): - try: - if not os.access(f, os.F_OK): - return None - return os.stat(f)[stat.ST_MTIME] - except: - return None - - (mc, fn, tn, taskfn) = split_tid_mcfn(tid) - if taskname is None: - taskname = tn - - stampfile = bb.parse.siggen.stampfile_mcfn(taskname, taskfn) - - # If the stamp is missing, it's not current - if not os.access(stampfile, os.F_OK): - logger.debug2("Stampfile %s not available", stampfile) - return False - # If it's a 'nostamp' task, it's not current - taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] - if 'nostamp' in taskdep and taskname in taskdep['nostamp']: - logger.debug2("%s.%s is nostamp\n", fn, taskname) - return False - - if taskname.endswith("_setscene"): - return True - - if cache is None: - cache = {} - - iscurrent = True - t1 = get_timestamp(stampfile) - for dep in self.rqdata.runtaskentries[tid].depends: - if iscurrent: - (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep) - stampfile2 = bb.parse.siggen.stampfile_mcfn(taskname2, taskfn2) - stampfile3 = bb.parse.siggen.stampfile_mcfn(taskname2 + "_setscene", taskfn2) - t2 = get_timestamp(stampfile2) - t3 = get_timestamp(stampfile3) - if t3 and not t2: - continue - if t3 and t3 > t2: - continue - if fn == fn2: - if not t2: - logger.debug2('Stampfile %s does not exist', stampfile2) - iscurrent = False - break - if t1 < t2: - logger.debug2('Stampfile %s < %s', stampfile, stampfile2) - iscurrent = False - break - if recurse and iscurrent: - if dep in cache: - iscurrent = cache[dep] - if not iscurrent: - logger.debug2('Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2)) - else: - iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache) - cache[dep] = iscurrent - if recurse: - cache[tid] = iscurrent - return iscurrent - - def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False, summary=True): - valid = set() - if self.hashvalidate: - sq_data = {} - sq_data['hash'] = {} - sq_data['hashfn'] = {} - sq_data['unihash'] = {} - for tid in tocheck: - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash - sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn] - sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash - - valid = self.validate_hash(sq_data, data, siginfo, currentcount, summary) - - return valid - - def validate_hash(self, sq_data, d, siginfo, currentcount, summary): - locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount, "summary" : summary} - - # Metadata has **kwargs so args can be added, sq_data can also gain new fields - call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount, summary=summary)" - - return bb.utils.better_eval(call, locs) - - def _execute_runqueue(self): - """ - Run the tasks in a queue prepared by rqdata.prepare() - Upon failure, optionally try to recover the build using any alternate providers - (if the halt on failure configuration option isn't set) - """ - - retval = True - bb.event.check_for_interrupts() - - if self.state == RunQueueState.PREPARE: - # NOTE: if you add, remove or significantly refactor the stages of this - # process then you should recalculate the weightings here. This is quite - # easy to do - just change the next line temporarily to pass debug=True as - # the last parameter and you'll get a printout of the weightings as well - # as a map to the lines where next_stage() was called. Of course this isn't - # critical, but it helps to keep the progress reporting accurate. - self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data, - "Initialising tasks", - [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244]) - if self.rqdata.prepare() == 0: - self.state = RunQueueState.COMPLETE - else: - self.state = RunQueueState.SCENE_INIT - bb.parse.siggen.save_unitaskhashes() - - if self.state == RunQueueState.SCENE_INIT: - self.rqdata.init_progress_reporter.next_stage() - - # we are ready to run, emit dependency info to any UI or class which - # needs it - depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData) - self.rqdata.init_progress_reporter.next_stage() - bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data) - - if not self.dm_event_handler_registered: - res = bb.event.register(self.dm_event_handler_name, - lambda x, y: self.dm.check(self) if self.state in [RunQueueState.RUNNING, RunQueueState.CLEAN_UP] else False, - ('bb.event.HeartbeatEvent',), data=self.cfgData) - self.dm_event_handler_registered = True - - self.rqdata.init_progress_reporter.next_stage() - self.rqexe = RunQueueExecute(self) - - dumpsigs = self.cooker.configuration.dump_signatures - if dumpsigs: - self.rqdata.init_progress_reporter.finish() - if 'printdiff' in dumpsigs: - self.invalidtasks_dump = self.print_diffscenetasks() - self.state = RunQueueState.DUMP_SIGS - - if self.state == RunQueueState.DUMP_SIGS: - dumpsigs = self.cooker.configuration.dump_signatures - retval = self.dump_signatures(dumpsigs) - if retval is False: - if 'printdiff' in dumpsigs: - self.write_diffscenetasks(self.invalidtasks_dump) - self.state = RunQueueState.COMPLETE - - if self.state == RunQueueState.SCENE_INIT: - self.start_worker(self.rqexe) - self.rqdata.init_progress_reporter.finish() - - # If we don't have any setscene functions, skip execution - if not self.rqdata.runq_setscene_tids: - logger.info('No setscene tasks') - for tid in self.rqdata.runtaskentries: - if not self.rqdata.runtaskentries[tid].depends: - self.rqexe.setbuildable(tid) - self.rqexe.tasks_notcovered.add(tid) - self.rqexe.sqdone = True - logger.info('Executing Tasks') - self.state = RunQueueState.RUNNING - - if self.state == RunQueueState.RUNNING: - retval = self.rqexe.execute() - - if self.state == RunQueueState.CLEAN_UP: - retval = self.rqexe.finish() - - build_done = self.state in [RunQueueState.COMPLETE, RunQueueState.FAILED] - - if build_done and self.dm_event_handler_registered: - bb.event.remove(self.dm_event_handler_name, None, data=self.cfgData) - self.dm_event_handler_registered = False - - if build_done and self.rqexe: - bb.parse.siggen.save_unitaskhashes() - self.teardown_workers() - if self.rqexe: - if self.rqexe.stats.failed: - logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed) - else: - # Let's avoid the word "failed" if nothing actually did - logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped) - - if self.state == RunQueueState.FAILED: - raise bb.runqueue.TaskFailure(self.rqexe.failed_tids) - - if self.state == RunQueueState.COMPLETE: - # All done - return False - - # Loop - return retval - - def execute_runqueue(self): - # Catch unexpected exceptions and ensure we exit when an error occurs, not loop. - try: - return self._execute_runqueue() - except bb.runqueue.TaskFailure: - raise - except SystemExit: - raise - except bb.BBHandledException: - try: - self.teardown_workers() - except: - pass - self.state = RunQueueState.COMPLETE - raise - except Exception as err: - logger.exception("An uncaught exception occurred in runqueue") - try: - self.teardown_workers() - except: - pass - self.state = RunQueueState.COMPLETE - raise - - def finish_runqueue(self, now = False): - if not self.rqexe: - self.state = RunQueueState.COMPLETE - return - - if now: - self.rqexe.finish_now() - else: - self.rqexe.finish() - - def _rq_dump_sigtid(self, tids): - for tid in tids: - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - dataCaches = self.rqdata.dataCaches - bb.parse.siggen.dump_sigtask(taskfn, taskname, dataCaches[mc].stamp[taskfn], True) - - def dump_signatures(self, options): - if not hasattr(self, "dumpsigs_launched"): - if bb.cooker.CookerFeatures.RECIPE_SIGGEN_INFO not in self.cooker.featureset: - bb.fatal("The dump signatures functionality needs the RECIPE_SIGGEN_INFO feature enabled") - - bb.note("Writing task signature files") - - max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1) - def chunkify(l, n): - return [l[i::n] for i in range(n)] - dumpsigs_tids = chunkify(list(self.rqdata.runtaskentries), max_process) - - # We cannot use the real multiprocessing.Pool easily due to some local data - # that can't be pickled. This is a cheap multi-process solution. - self.dumpsigs_launched = [] - - for tids in dumpsigs_tids: - p = Process(target=self._rq_dump_sigtid, args=(tids, )) - p.start() - self.dumpsigs_launched.append(p) - - return 1.0 - - for q in self.dumpsigs_launched: - # The finished processes are joined when calling is_alive() - if not q.is_alive(): - self.dumpsigs_launched.remove(q) - - if self.dumpsigs_launched: - return 1.0 - - for p in self.dumpsigs_launched: - p.join() - - bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options) - - return False - - def print_diffscenetasks(self): - def get_root_invalid_tasks(task, taskdepends, valid, noexec, visited_invalid): - invalidtasks = [] - for t in taskdepends[task].depends: - if t not in valid and t not in visited_invalid: - invalidtasks.extend(get_root_invalid_tasks(t, taskdepends, valid, noexec, visited_invalid)) - visited_invalid.add(t) - - direct_invalid = [t for t in taskdepends[task].depends if t not in valid] - if not direct_invalid and task not in noexec: - invalidtasks = [task] - return invalidtasks - - noexec = [] - tocheck = set() - - for tid in self.rqdata.runtaskentries: - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] - - if 'noexec' in taskdep and taskname in taskdep['noexec']: - noexec.append(tid) - continue - - tocheck.add(tid) - - valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True, summary=False) - - # Tasks which are both setscene and noexec never care about dependencies - # We therefore find tasks which are setscene and noexec and mark their - # unique dependencies as valid. - for tid in noexec: - if tid not in self.rqdata.runq_setscene_tids: - continue - for dep in self.rqdata.runtaskentries[tid].depends: - hasnoexecparents = True - for dep2 in self.rqdata.runtaskentries[dep].revdeps: - if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec: - continue - hasnoexecparents = False - break - if hasnoexecparents: - valid_new.add(dep) - - invalidtasks = set() - - toptasks = set(["{}:{}".format(t[3], t[2]) for t in self.rqdata.targets]) - for tid in toptasks: - toprocess = set([tid]) - while toprocess: - next = set() - visited_invalid = set() - for t in toprocess: - if t not in valid_new and t not in noexec: - invalidtasks.update(get_root_invalid_tasks(t, self.rqdata.runtaskentries, valid_new, noexec, visited_invalid)) - continue - if t in self.rqdata.runq_setscene_tids: - for dep in self.rqexe.sqdata.sq_deps[t]: - next.add(dep) - continue - - for dep in self.rqdata.runtaskentries[t].depends: - next.add(dep) - - toprocess = next - - tasklist = [] - for tid in invalidtasks: - tasklist.append(tid) - - if tasklist: - bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist)) - - return invalidtasks - - def write_diffscenetasks(self, invalidtasks): - bb.siggen.check_siggen_version(bb.siggen) - - # Define recursion callback - def recursecb(key, hash1, hash2): - hashes = [hash1, hash2] - bb.debug(1, "Recursively looking for recipe {} hashes {}".format(key, hashes)) - hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData) - bb.debug(1, "Found hashfiles:\n{}".format(hashfiles)) - - recout = [] - if len(hashfiles) == 2: - out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb) - recout.extend(list(' ' + l for l in out2)) - else: - recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2)) - - return recout - - - for tid in invalidtasks: - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] - h = self.rqdata.runtaskentries[tid].unihash - bb.debug(1, "Looking for recipe {} task {}".format(pn, taskname)) - matches = bb.siggen.find_siginfo(pn, taskname, [], self.cooker.databuilder.mcdata[mc]) - bb.debug(1, "Found hashfiles:\n{}".format(matches)) - match = None - for m in matches.values(): - if h in m['path']: - match = m['path'] - if match is None: - bb.fatal("Can't find a task we're supposed to have written out? (hash: %s tid: %s)?" % (h, tid)) - matches = {k : v for k, v in iter(matches.items()) if h not in k} - matches_local = {k : v for k, v in iter(matches.items()) if h not in k and not v['sstate']} - if matches_local: - matches = matches_local - if matches: - latestmatch = matches[sorted(matches.keys(), key=lambda h: matches[h]['time'])[-1]]['path'] - prevh = __find_sha256__.search(latestmatch).group(0) - output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb) - bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, most recent matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output)) - - -class RunQueueExecute: - - def __init__(self, rq): - self.rq = rq - self.cooker = rq.cooker - self.cfgData = rq.cfgData - self.rqdata = rq.rqdata - - self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1) - self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed" - self.max_cpu_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_CPU") - self.max_io_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_IO") - self.max_memory_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_MEMORY") - self.max_loadfactor = self.cfgData.getVar("BB_LOADFACTOR_MAX") - - self.sq_buildable = set() - self.sq_running = set() - self.sq_live = set() - - self.updated_taskhash_queue = [] - self.pending_migrations = set() - - self.runq_buildable = set() - self.runq_running = set() - self.runq_complete = set() - self.runq_tasksrun = set() - - self.build_stamps = {} - self.build_stamps2 = [] - self.failed_tids = [] - self.sq_deferred = {} - self.sq_needed_harddeps = set() - self.sq_harddep_deferred = set() - - self.stampcache = {} - - self.holdoff_tasks = set() - self.holdoff_need_update = True - self.sqdone = False - - self.stats = RunQueueStats(len(self.rqdata.runtaskentries), len(self.rqdata.runq_setscene_tids)) - - if self.number_tasks <= 0: - bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks) - - lower_limit = 1.0 - upper_limit = 1000000.0 - if self.max_cpu_pressure: - self.max_cpu_pressure = float(self.max_cpu_pressure) - if self.max_cpu_pressure < lower_limit: - bb.fatal("Invalid BB_PRESSURE_MAX_CPU %s, minimum value is %s." % (self.max_cpu_pressure, lower_limit)) - if self.max_cpu_pressure > upper_limit: - bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_CPU is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_cpu_pressure)) - - if self.max_io_pressure: - self.max_io_pressure = float(self.max_io_pressure) - if self.max_io_pressure < lower_limit: - bb.fatal("Invalid BB_PRESSURE_MAX_IO %s, minimum value is %s." % (self.max_io_pressure, lower_limit)) - if self.max_io_pressure > upper_limit: - bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_IO is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure)) - - if self.max_memory_pressure: - self.max_memory_pressure = float(self.max_memory_pressure) - if self.max_memory_pressure < lower_limit: - bb.fatal("Invalid BB_PRESSURE_MAX_MEMORY %s, minimum value is %s." % (self.max_memory_pressure, lower_limit)) - if self.max_memory_pressure > upper_limit: - bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_MEMORY is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure)) - - if self.max_loadfactor: - self.max_loadfactor = float(self.max_loadfactor) - if self.max_loadfactor <= 0: - bb.fatal("Invalid BB_LOADFACTOR_MAX %s, needs to be greater than zero." % (self.max_loadfactor)) - - # List of setscene tasks which we've covered - self.scenequeue_covered = set() - # List of tasks which are covered (including setscene ones) - self.tasks_covered = set() - self.tasks_scenequeue_done = set() - self.scenequeue_notcovered = set() - self.tasks_notcovered = set() - self.scenequeue_notneeded = set() - - schedulers = self.get_schedulers() - for scheduler in schedulers: - if self.scheduler == scheduler.name: - self.sched = scheduler(self, self.rqdata) - logger.debug("Using runqueue scheduler '%s'", scheduler.name) - break - else: - bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" % - (self.scheduler, ", ".join(obj.name for obj in schedulers))) - - #if self.rqdata.runq_setscene_tids: - self.sqdata = SQData() - build_scenequeue_data(self.sqdata, self.rqdata, self) - - update_scenequeue_data(self.sqdata.sq_revdeps, self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=True) - - # Compute a list of 'stale' sstate tasks where the current hash does not match the one - # in any stamp files. Pass the list out to metadata as an event. - found = {} - for tid in self.rqdata.runq_setscene_tids: - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - stamps = bb.build.find_stale_stamps(taskname, taskfn) - if stamps: - if mc not in found: - found[mc] = {} - found[mc][tid] = stamps - for mc in found: - event = bb.event.StaleSetSceneTasks(found[mc]) - bb.event.fire(event, self.cooker.databuilder.mcdata[mc]) - - self.build_taskdepdata_cache() - - def runqueue_process_waitpid(self, task, status, fakerootlog=None): - - # self.build_stamps[pid] may not exist when use shared work directory. - if task in self.build_stamps: - self.build_stamps2.remove(self.build_stamps[task]) - del self.build_stamps[task] - - if task in self.sq_live: - if status != 0: - self.sq_task_fail(task, status) - else: - self.sq_task_complete(task) - self.sq_live.remove(task) - self.stats.updateActiveSetscene(len(self.sq_live)) - else: - if status != 0: - self.task_fail(task, status, fakerootlog=fakerootlog) - else: - self.task_complete(task) - return True - - def finish_now(self): - for mc in self.rq.worker: - try: - RunQueue.send_pickled_data(self.rq.worker[mc].process, b"", "finishnow") - self.rq.worker[mc].process.stdin.flush() - except IOError: - # worker must have died? - pass - for mc in self.rq.fakeworker: - try: - RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, b"", "finishnow") - self.rq.fakeworker[mc].process.stdin.flush() - except IOError: - # worker must have died? - pass - - if self.failed_tids: - self.rq.state = RunQueueState.FAILED - return - - self.rq.state = RunQueueState.COMPLETE - return - - def finish(self): - self.rq.state = RunQueueState.CLEAN_UP - - active = self.stats.active + len(self.sq_live) - if active > 0: - bb.event.fire(runQueueExitWait(active), self.cfgData) - self.rq.read_workers() - return self.rq.active_fds() - - if self.failed_tids: - self.rq.state = RunQueueState.FAILED - return True - - self.rq.state = RunQueueState.COMPLETE - return True - - # Used by setscene only - def check_dependencies(self, task, taskdeps): - if not self.rq.depvalidate: - return False - - # Must not edit parent data - taskdeps = set(taskdeps) - - taskdata = {} - taskdeps.add(task) - for dep in taskdeps: - (mc, fn, taskname, taskfn) = split_tid_mcfn(dep) - pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] - taskdata[dep] = [pn, taskname, fn] - call = self.rq.depvalidate + "(task, taskdata, notneeded, d)" - locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data } - valid = bb.utils.better_eval(call, locs) - return valid - - def can_start_task(self): - active = self.stats.active + len(self.sq_live) - can_start = active < self.number_tasks - return can_start - - def get_schedulers(self): - schedulers = set(obj for obj in globals().values() - if type(obj) is type and - issubclass(obj, RunQueueScheduler)) - - user_schedulers = self.cfgData.getVar("BB_SCHEDULERS") - if user_schedulers: - for sched in user_schedulers.split(): - if not "." in sched: - bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched) - continue - - modname, name = sched.rsplit(".", 1) - try: - module = __import__(modname, fromlist=(name,)) - except ImportError as exc: - bb.fatal("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc)) - else: - schedulers.add(getattr(module, name)) - return schedulers - - def setbuildable(self, task): - self.runq_buildable.add(task) - self.sched.newbuildable(task) - - def task_completeoutright(self, task): - """ - Mark a task as completed - Look at the reverse dependencies and mark any task with - completed dependencies as buildable - """ - self.runq_complete.add(task) - for revdep in self.rqdata.runtaskentries[task].revdeps: - if revdep in self.runq_running: - continue - if revdep in self.runq_buildable: - continue - alldeps = True - for dep in self.rqdata.runtaskentries[revdep].depends: - if dep not in self.runq_complete: - alldeps = False - break - if alldeps: - self.setbuildable(revdep) - logger.debug("Marking task %s as buildable", revdep) - - found = None - for t in sorted(self.sq_deferred.copy()): - if self.sq_deferred[t] == task: - # Allow the next deferred task to run. Any other deferred tasks should be deferred after that task. - # We shouldn't allow all to run at once as it is prone to races. - if not found: - bb.debug(1, "Deferred task %s now buildable" % t) - del self.sq_deferred[t] - update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) - found = t - else: - bb.debug(1, "Deferring %s after %s" % (t, found)) - self.sq_deferred[t] = found - - def task_complete(self, task): - self.stats.taskCompleted() - bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) - self.task_completeoutright(task) - self.runq_tasksrun.add(task) - - def task_fail(self, task, exitcode, fakerootlog=None): - """ - Called when a task has failed - Updates the state engine with the failure - """ - self.stats.taskFailed() - self.failed_tids.append(task) - - fakeroot_log = [] - if fakerootlog and os.path.exists(fakerootlog): - with open(fakerootlog) as fakeroot_log_file: - fakeroot_failed = False - for line in reversed(fakeroot_log_file.readlines()): - for fakeroot_error in ['mismatch', 'error', 'fatal']: - if fakeroot_error in line.lower(): - fakeroot_failed = True - if 'doing new pid setup and server start' in line: - break - fakeroot_log.append(line) - - if not fakeroot_failed: - fakeroot_log = [] - - bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=("".join(fakeroot_log) or None)), self.cfgData) - - if self.rqdata.taskData[''].halt: - self.rq.state = RunQueueState.CLEAN_UP - - def task_skip(self, task, reason): - self.runq_running.add(task) - self.setbuildable(task) - bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData) - self.task_completeoutright(task) - self.stats.taskSkipped() - self.stats.taskCompleted() - - def summarise_scenequeue_errors(self): - err = False - if not self.sqdone: - logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered))) - completeevent = sceneQueueComplete(self.stats, self.rq) - bb.event.fire(completeevent, self.cfgData) - if self.sq_deferred: - logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred)) - err = True - if self.updated_taskhash_queue: - logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue)) - err = True - if self.holdoff_tasks: - logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks)) - err = True - - for tid in self.scenequeue_covered.intersection(self.scenequeue_notcovered): - # No task should end up in both covered and uncovered, that is a bug. - logger.error("Setscene task %s in both covered and notcovered." % tid) - - for tid in self.rqdata.runq_setscene_tids: - if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered: - err = True - logger.error("Setscene Task %s was never marked as covered or not covered" % tid) - if tid not in self.sq_buildable: - err = True - logger.error("Setscene Task %s was never marked as buildable" % tid) - if tid not in self.sq_running: - err = True - logger.error("Setscene Task %s was never marked as running" % tid) - - for x in self.rqdata.runtaskentries: - if x not in self.tasks_covered and x not in self.tasks_notcovered: - logger.error("Task %s was never moved from the setscene queue" % x) - err = True - if x not in self.tasks_scenequeue_done: - logger.error("Task %s was never processed by the setscene code" % x) - err = True - if not self.rqdata.runtaskentries[x].depends and x not in self.runq_buildable: - logger.error("Task %s was never marked as buildable by the setscene code" % x) - err = True - return err - - - def execute(self): - """ - Run the tasks in a queue prepared by prepare_runqueue - """ - - self.rq.read_workers() - if self.updated_taskhash_queue or self.pending_migrations: - self.process_possible_migrations() - - if not hasattr(self, "sorted_setscene_tids"): - # Don't want to sort this set every execution - self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids) - # Resume looping where we left off when we returned to feed the mainloop - self.setscene_tids_generator = itertools.cycle(self.rqdata.runq_setscene_tids) - - task = None - if not self.sqdone and self.can_start_task(): - loopcount = 0 - # Find the next setscene to run, exit the loop when we've processed all tids or found something to execute - while loopcount < len(self.rqdata.runq_setscene_tids): - loopcount += 1 - nexttask = next(self.setscene_tids_generator) - if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values() and nexttask not in self.sq_harddep_deferred: - if nexttask in self.sq_deferred and self.sq_deferred[nexttask] not in self.runq_complete: - # Skip deferred tasks quickly before the 'expensive' tests below - this is key to performant multiconfig builds - continue - if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and \ - nexttask not in self.sq_needed_harddeps and \ - self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and \ - self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]): - if nexttask not in self.rqdata.target_tids: - logger.debug2("Skipping setscene for task %s" % nexttask) - self.sq_task_skip(nexttask) - self.scenequeue_notneeded.add(nexttask) - if nexttask in self.sq_deferred: - del self.sq_deferred[nexttask] - return True - if nexttask in self.sqdata.sq_harddeps_rev and not self.sqdata.sq_harddeps_rev[nexttask].issubset(self.scenequeue_covered | self.scenequeue_notcovered): - logger.debug2("Deferring %s due to hard dependencies" % nexttask) - updated = False - for dep in self.sqdata.sq_harddeps_rev[nexttask]: - if dep not in self.sq_needed_harddeps: - logger.debug2("Enabling task %s as it is a hard dependency" % dep) - self.sq_buildable.add(dep) - self.sq_needed_harddeps.add(dep) - updated = True - self.sq_harddep_deferred.add(nexttask) - if updated: - return True - continue - # If covered tasks are running, need to wait for them to complete - for t in self.sqdata.sq_covered_tasks[nexttask]: - if t in self.runq_running and t not in self.runq_complete: - continue - if nexttask in self.sq_deferred: - # Deferred tasks that were still deferred were skipped above so we now need to process - logger.debug("Task %s no longer deferred" % nexttask) - del self.sq_deferred[nexttask] - valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False) - if not valid: - logger.debug("%s didn't become valid, skipping setscene" % nexttask) - self.sq_task_failoutright(nexttask) - return True - if nexttask in self.sqdata.outrightfail: - logger.debug2('No package found, so skipping setscene task %s', nexttask) - self.sq_task_failoutright(nexttask) - return True - if nexttask in self.sqdata.unskippable: - logger.debug2("Setscene task %s is unskippable" % nexttask) - task = nexttask - break - if task is not None: - (mc, fn, taskname, taskfn) = split_tid_mcfn(task) - taskname = taskname + "_setscene" - if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache): - logger.debug2('Stamp for underlying task %s is current, so skipping setscene variant', task) - self.sq_task_failoutright(task) - return True - - if self.cooker.configuration.force: - if task in self.rqdata.target_tids: - self.sq_task_failoutright(task) - return True - - if self.rq.check_stamp_task(task, taskname, cache=self.stampcache): - logger.debug2('Setscene stamp current task %s, so skip it and its dependencies', task) - self.sq_task_skip(task) - return True - - if self.cooker.configuration.skipsetscene: - logger.debug2('No setscene tasks should be executed. Skipping %s', task) - self.sq_task_failoutright(task) - return True - - startevent = sceneQueueTaskStarted(task, self.stats, self.rq) - bb.event.fire(startevent, self.cfgData) - - taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] - realfn = bb.cache.virtualfn2realfn(taskfn)[0] - runtask = { - 'fn' : taskfn, - 'task' : task, - 'taskname' : taskname, - 'taskhash' : self.rqdata.get_task_hash(task), - 'unihash' : self.rqdata.get_task_unihash(task), - 'quieterrors' : True, - 'appends' : self.cooker.collections[mc].get_file_appends(taskfn), - 'layername' : self.cooker.collections[mc].calc_bbfile_priority(realfn)[2], - 'taskdepdata' : self.sq_build_taskdepdata(task), - 'dry_run' : False, - 'taskdep': taskdep, - 'fakerootenv' : self.rqdata.dataCaches[mc].fakerootenv[taskfn], - 'fakerootdirs' : self.rqdata.dataCaches[mc].fakerootdirs[taskfn], - 'fakerootnoenv' : self.rqdata.dataCaches[mc].fakerootnoenv[taskfn] - } - - if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: - if not mc in self.rq.fakeworker: - self.rq.start_fakeworker(self, mc) - RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, runtask, "runtask") - self.rq.fakeworker[mc].process.stdin.flush() - else: - RunQueue.send_pickled_data(self.rq.worker[mc].process, runtask, "runtask") - self.rq.worker[mc].process.stdin.flush() - - self.build_stamps[task] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) - self.build_stamps2.append(self.build_stamps[task]) - self.sq_running.add(task) - self.sq_live.add(task) - self.stats.updateActiveSetscene(len(self.sq_live)) - if self.can_start_task(): - return True - - self.update_holdofftasks() - - if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks: - hashequiv_logger.verbose("Setscene tasks completed") - - err = self.summarise_scenequeue_errors() - if err: - self.rq.state = RunQueueState.FAILED - return True - - if self.cooker.configuration.setsceneonly: - self.rq.state = RunQueueState.COMPLETE - return True - self.sqdone = True - - if self.stats.total == 0: - # nothing to do - self.rq.state = RunQueueState.COMPLETE - return True - - if self.cooker.configuration.setsceneonly: - task = None - else: - task = self.sched.next() - if task is not None: - (mc, fn, taskname, taskfn) = split_tid_mcfn(task) - - if self.rqdata.setscene_ignore_tasks is not None: - if self.check_setscene_ignore_tasks(task): - self.task_fail(task, "setscene ignore_tasks") - return True - - if task in self.tasks_covered: - logger.debug2("Setscene covered task %s", task) - self.task_skip(task, "covered") - return True - - if self.rq.check_stamp_task(task, taskname, cache=self.stampcache): - logger.debug2("Stamp current task %s", task) - - self.task_skip(task, "existing") - self.runq_tasksrun.add(task) - return True - - taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] - if 'noexec' in taskdep and taskname in taskdep['noexec']: - startevent = runQueueTaskStarted(task, self.stats, self.rq, - noexec=True) - bb.event.fire(startevent, self.cfgData) - self.runq_running.add(task) - self.stats.taskActive() - if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): - bb.build.make_stamp_mcfn(taskname, taskfn) - self.task_complete(task) - return True - else: - startevent = runQueueTaskStarted(task, self.stats, self.rq) - bb.event.fire(startevent, self.cfgData) - - taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] - realfn = bb.cache.virtualfn2realfn(taskfn)[0] - runtask = { - 'fn' : taskfn, - 'task' : task, - 'taskname' : taskname, - 'taskhash' : self.rqdata.get_task_hash(task), - 'unihash' : self.rqdata.get_task_unihash(task), - 'quieterrors' : False, - 'appends' : self.cooker.collections[mc].get_file_appends(taskfn), - 'layername' : self.cooker.collections[mc].calc_bbfile_priority(realfn)[2], - 'taskdepdata' : self.build_taskdepdata(task), - 'dry_run' : self.rqdata.setscene_enforce, - 'taskdep': taskdep, - 'fakerootenv' : self.rqdata.dataCaches[mc].fakerootenv[taskfn], - 'fakerootdirs' : self.rqdata.dataCaches[mc].fakerootdirs[taskfn], - 'fakerootnoenv' : self.rqdata.dataCaches[mc].fakerootnoenv[taskfn] - } - - if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce): - if not mc in self.rq.fakeworker: - try: - self.rq.start_fakeworker(self, mc) - except OSError as exc: - logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc))) - self.rq.state = RunQueueState.FAILED - self.stats.taskFailed() - return True - RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, runtask, "runtask") - self.rq.fakeworker[mc].process.stdin.flush() - else: - RunQueue.send_pickled_data(self.rq.worker[mc].process, runtask, "runtask") - self.rq.worker[mc].process.stdin.flush() - - self.build_stamps[task] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) - self.build_stamps2.append(self.build_stamps[task]) - self.runq_running.add(task) - self.stats.taskActive() - if self.can_start_task(): - return True - - if self.stats.active > 0 or self.sq_live: - self.rq.read_workers() - return self.rq.active_fds() - - # No more tasks can be run. If we have deferred setscene tasks we should run them. - if self.sq_deferred: - deferred_tid = list(self.sq_deferred.keys())[0] - blocking_tid = self.sq_deferred.pop(deferred_tid) - logger.warning("Runqueue deadlocked on deferred tasks, forcing task %s blocked by %s" % (deferred_tid, blocking_tid)) - return True - - if self.failed_tids: - self.rq.state = RunQueueState.FAILED - return True - - # Sanity Checks - err = self.summarise_scenequeue_errors() - for task in self.rqdata.runtaskentries: - if task not in self.runq_buildable: - logger.error("Task %s never buildable!", task) - err = True - elif task not in self.runq_running: - logger.error("Task %s never ran!", task) - err = True - elif task not in self.runq_complete: - logger.error("Task %s never completed!", task) - err = True - - if err: - self.rq.state = RunQueueState.FAILED - else: - self.rq.state = RunQueueState.COMPLETE - - return True - - def filtermcdeps(self, task, mc, deps): - ret = set() - for dep in deps: - thismc = mc_from_tid(dep) - if thismc != mc: - continue - ret.add(dep) - return ret - - # Build the individual cache entries in advance once to save time - def build_taskdepdata_cache(self): - taskdepdata_cache = {} - for task in self.rqdata.runtaskentries: - (mc, fn, taskname, taskfn) = split_tid_mcfn(task) - taskdepdata_cache[task] = bb.TaskData( - pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn], - taskname = taskname, - fn = fn, - deps = self.filtermcdeps(task, mc, self.rqdata.runtaskentries[task].depends), - provides = self.rqdata.dataCaches[mc].fn_provides[taskfn], - taskhash = self.rqdata.runtaskentries[task].hash, - unihash = self.rqdata.runtaskentries[task].unihash, - hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn], - taskhash_deps = self.rqdata.runtaskentries[task].taskhash_deps, - ) - - self.taskdepdata_cache = taskdepdata_cache - - # We filter out multiconfig dependencies from taskdepdata we pass to the tasks - # as most code can't handle them - def build_taskdepdata(self, task): - taskdepdata = {} - mc = mc_from_tid(task) - next = self.rqdata.runtaskentries[task].depends.copy() - next.add(task) - next = self.filtermcdeps(task, mc, next) - while next: - additional = [] - for revdep in next: - self.taskdepdata_cache[revdep] = self.taskdepdata_cache[revdep]._replace( - unihash=self.rqdata.runtaskentries[revdep].unihash - ) - taskdepdata[revdep] = self.taskdepdata_cache[revdep] - for revdep2 in self.taskdepdata_cache[revdep].deps: - if revdep2 not in taskdepdata: - additional.append(revdep2) - next = additional - - #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n")) - return taskdepdata - - def update_holdofftasks(self): - - if not self.holdoff_need_update: - return - - notcovered = set(self.scenequeue_notcovered) - notcovered |= self.sqdata.cantskip - for tid in self.scenequeue_notcovered: - notcovered |= self.sqdata.sq_covered_tasks[tid] - notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids) - notcovered.intersection_update(self.tasks_scenequeue_done) - - covered = set(self.scenequeue_covered) - for tid in self.scenequeue_covered: - covered |= self.sqdata.sq_covered_tasks[tid] - covered.difference_update(notcovered) - covered.intersection_update(self.tasks_scenequeue_done) - - for tid in notcovered | covered: - if not self.rqdata.runtaskentries[tid].depends: - self.setbuildable(tid) - elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete): - self.setbuildable(tid) - - self.tasks_covered = covered - self.tasks_notcovered = notcovered - - self.holdoff_tasks = set() - - for tid in self.rqdata.runq_setscene_tids: - if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered: - self.holdoff_tasks.add(tid) - - for tid in self.holdoff_tasks.copy(): - for dep in self.sqdata.sq_covered_tasks[tid]: - if dep not in self.runq_complete: - self.holdoff_tasks.add(dep) - - self.holdoff_need_update = False - - def process_possible_migrations(self): - - changed = set() - toprocess = set() - for tid, unihash in self.updated_taskhash_queue.copy(): - if tid in self.runq_running and tid not in self.runq_complete: - continue - - self.updated_taskhash_queue.remove((tid, unihash)) - - if unihash != self.rqdata.runtaskentries[tid].unihash: - # Make sure we rehash any other tasks with the same task hash that we're deferred against. - torehash = [tid] - for deftid in self.sq_deferred: - if self.sq_deferred[deftid] == tid: - torehash.append(deftid) - for hashtid in torehash: - hashequiv_logger.verbose("Task %s unihash changed to %s" % (hashtid, unihash)) - self.rqdata.runtaskentries[hashtid].unihash = unihash - bb.parse.siggen.set_unihash(hashtid, unihash) - toprocess.add(hashtid) - - # Work out all tasks which depend upon these - total = set() - next = set() - for p in toprocess: - next |= self.rqdata.runtaskentries[p].revdeps - while next: - current = next.copy() - total = total | next - next = set() - for ntid in current: - next |= self.rqdata.runtaskentries[ntid].revdeps - next.difference_update(total) - - # Now iterate those tasks in dependency order to regenerate their taskhash/unihash - next = set() - for p in total: - if not self.rqdata.runtaskentries[p].depends: - next.add(p) - elif self.rqdata.runtaskentries[p].depends.isdisjoint(total): - next.add(p) - - starttime = time.time() - lasttime = starttime - - # When an item doesn't have dependencies in total, we can process it. Drop items from total when handled - while next: - current = next.copy() - next = set() - ready = {} - for tid in current: - if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total): - continue - # get_taskhash for a given tid *must* be called before get_unihash* below - ready[tid] = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, self.rqdata.dataCaches) - - unihashes = bb.parse.siggen.get_unihashes(ready.keys()) - - for tid in ready: - orighash = self.rqdata.runtaskentries[tid].hash - newhash = ready[tid] - origuni = self.rqdata.runtaskentries[tid].unihash - newuni = unihashes[tid] - - # FIXME, need to check it can come from sstate at all for determinism? - remapped = False - if newuni == origuni: - # Nothing to do, we match, skip code below - remapped = True - elif tid in self.scenequeue_covered or tid in self.sq_live: - # Already ran this setscene task or it running. Report the new taskhash - bb.parse.siggen.report_unihash_equiv(tid, newhash, origuni, newuni, self.rqdata.dataCaches) - hashequiv_logger.verbose("Already covered setscene for %s so ignoring rehash (remap)" % (tid)) - remapped = True - - if not remapped: - #logger.debug("Task %s hash changes: %s->%s %s->%s" % (tid, orighash, newhash, origuni, newuni)) - self.rqdata.runtaskentries[tid].hash = newhash - self.rqdata.runtaskentries[tid].unihash = newuni - changed.add(tid) - - next |= self.rqdata.runtaskentries[tid].revdeps - total.remove(tid) - next.intersection_update(total) - bb.event.check_for_interrupts() - - if time.time() > (lasttime + 30): - lasttime = time.time() - hashequiv_logger.verbose("Rehash loop slow progress: %s in %s" % (len(total), lasttime - starttime)) - - endtime = time.time() - if (endtime-starttime > 60): - hashequiv_logger.verbose("Rehash loop took more than 60s: %s" % (endtime-starttime)) - - if changed: - for mc in self.rq.worker: - RunQueue.send_pickled_data(self.rq.worker[mc].process, bb.parse.siggen.get_taskhashes(), "newtaskhashes") - for mc in self.rq.fakeworker: - RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, bb.parse.siggen.get_taskhashes(), "newtaskhashes") - - hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed))) - - for tid in changed: - if tid not in self.rqdata.runq_setscene_tids: - continue - if tid not in self.pending_migrations: - self.pending_migrations.add(tid) - - update_tasks = [] - for tid in self.pending_migrations.copy(): - if tid in self.runq_running or tid in self.sq_live: - # Too late, task already running, not much we can do now - self.pending_migrations.remove(tid) - continue - - valid = True - # Check no tasks this covers are running - for dep in self.sqdata.sq_covered_tasks[tid]: - if dep in self.runq_running and dep not in self.runq_complete: - hashequiv_logger.debug2("Task %s is running which blocks setscene for %s from running" % (dep, tid)) - valid = False - break - if not valid: - continue - - self.pending_migrations.remove(tid) - changed = True - - if tid in self.tasks_scenequeue_done: - self.tasks_scenequeue_done.remove(tid) - for dep in self.sqdata.sq_covered_tasks[tid]: - if dep in self.runq_complete and dep not in self.runq_tasksrun: - bb.error("Task %s marked as completed but now needing to rerun? Halting build." % dep) - self.failed_tids.append(tid) - self.rq.state = RunQueueState.CLEAN_UP - return - - if dep not in self.runq_complete: - if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable: - self.tasks_scenequeue_done.remove(dep) - - if tid in self.sq_buildable: - self.sq_buildable.remove(tid) - if tid in self.sq_running: - self.sq_running.remove(tid) - if tid in self.sqdata.outrightfail: - self.sqdata.outrightfail.remove(tid) - if tid in self.scenequeue_notcovered: - self.scenequeue_notcovered.remove(tid) - if tid in self.scenequeue_covered: - self.scenequeue_covered.remove(tid) - if tid in self.scenequeue_notneeded: - self.scenequeue_notneeded.remove(tid) - - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - self.sqdata.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) - - if tid in self.stampcache: - del self.stampcache[tid] - - if tid in self.build_stamps: - del self.build_stamps[tid] - - update_tasks.append(tid) - - update_tasks2 = [] - for tid in update_tasks: - harddepfail = False - for t in self.sqdata.sq_harddeps_rev[tid]: - if t in self.scenequeue_notcovered: - harddepfail = True - break - if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered): - if tid not in self.sq_buildable: - self.sq_buildable.add(tid) - if not self.sqdata.sq_revdeps[tid]: - self.sq_buildable.add(tid) - - update_tasks2.append((tid, harddepfail, tid in self.sqdata.valid)) - - if update_tasks2: - self.sqdone = False - for mc in sorted(self.sqdata.multiconfigs): - for tid in sorted([t[0] for t in update_tasks2]): - if mc_from_tid(tid) != mc: - continue - h = pending_hash_index(tid, self.rqdata) - if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]: - self.sq_deferred[tid] = self.sqdata.hashes[h] - bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h])) - update_scenequeue_data([t[0] for t in update_tasks2], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) - - for (tid, harddepfail, origvalid) in update_tasks2: - if tid in self.sqdata.valid and not origvalid: - hashequiv_logger.verbose("Setscene task %s became valid" % tid) - if harddepfail: - logger.debug2("%s has an unavailable hard dependency so skipping" % (tid)) - self.sq_task_failoutright(tid) - - if changed: - self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered)) - self.sq_needed_harddeps = set() - self.sq_harddep_deferred = set() - self.holdoff_need_update = True - - def scenequeue_updatecounters(self, task, fail=False): - - if fail and task in self.sqdata.sq_harddeps: - for dep in sorted(self.sqdata.sq_harddeps[task]): - if dep in self.scenequeue_covered or dep in self.scenequeue_notcovered: - # dependency could be already processed, e.g. noexec setscene task - continue - noexec, stamppresent = check_setscene_stamps(dep, self.rqdata, self.rq, self.stampcache) - if noexec or stamppresent: - continue - logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep)) - self.sq_task_failoutright(dep) - continue - - # For performance, only compute allcovered once if needed - if self.sqdata.sq_deps[task]: - allcovered = self.scenequeue_covered | self.scenequeue_notcovered - for dep in sorted(self.sqdata.sq_deps[task]): - if self.sqdata.sq_revdeps[dep].issubset(allcovered): - if dep not in self.sq_buildable: - self.sq_buildable.add(dep) - - next = set([task]) - while next: - new = set() - for t in sorted(next): - self.tasks_scenequeue_done.add(t) - # Look down the dependency chain for non-setscene things which this task depends on - # and mark as 'done' - for dep in self.rqdata.runtaskentries[t].depends: - if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done: - continue - if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done): - new.add(dep) - next = new - - # If this task was one which other setscene tasks have a hard dependency upon, we need - # to walk through the hard dependencies and allow execution of those which have completed dependencies. - if task in self.sqdata.sq_harddeps: - for dep in self.sq_harddep_deferred.copy(): - if self.sqdata.sq_harddeps_rev[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered): - self.sq_harddep_deferred.remove(dep) - - self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered)) - self.holdoff_need_update = True - - def sq_task_completeoutright(self, task): - """ - Mark a task as completed - Look at the reverse dependencies and mark any task with - completed dependencies as buildable - """ - - logger.debug('Found task %s which could be accelerated', task) - self.scenequeue_covered.add(task) - self.scenequeue_updatecounters(task) - - def sq_check_taskfail(self, task): - if self.rqdata.setscene_ignore_tasks is not None: - realtask = task.split('_setscene')[0] - (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask) - pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] - if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks): - logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) - self.rq.state = RunQueueState.CLEAN_UP - - def sq_task_complete(self, task): - bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) - self.sq_task_completeoutright(task) - - def sq_task_fail(self, task, result): - bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData) - self.scenequeue_notcovered.add(task) - self.scenequeue_updatecounters(task, True) - self.sq_check_taskfail(task) - - def sq_task_failoutright(self, task): - self.sq_running.add(task) - self.sq_buildable.add(task) - self.scenequeue_notcovered.add(task) - self.scenequeue_updatecounters(task, True) - - def sq_task_skip(self, task): - self.sq_running.add(task) - self.sq_buildable.add(task) - self.sq_task_completeoutright(task) - - def sq_build_taskdepdata(self, task): - def getsetscenedeps(tid): - deps = set() - (mc, fn, taskname, _) = split_tid_mcfn(tid) - realtid = tid + "_setscene" - idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends - for (depname, idependtask) in idepends: - if depname not in self.rqdata.taskData[mc].build_targets: - continue - - depfn = self.rqdata.taskData[mc].build_targets[depname][0] - if depfn is None: - continue - deptid = depfn + ":" + idependtask.replace("_setscene", "") - deps.add(deptid) - return deps - - taskdepdata = {} - next = getsetscenedeps(task) - next.add(task) - while next: - additional = [] - for revdep in next: - (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep) - deps = getsetscenedeps(revdep) - - taskdepdata[revdep] = bb.TaskData( - pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn], - taskname = taskname, - fn = fn, - deps = deps, - provides = self.rqdata.dataCaches[mc].fn_provides[taskfn], - taskhash = self.rqdata.runtaskentries[revdep].hash, - unihash = self.rqdata.runtaskentries[revdep].unihash, - hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn], - taskhash_deps = self.rqdata.runtaskentries[revdep].taskhash_deps, - ) - for revdep2 in deps: - if revdep2 not in taskdepdata: - additional.append(revdep2) - next = additional - - #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n")) - return taskdepdata - - def check_setscene_ignore_tasks(self, tid): - # Check task that is going to run against the ignore tasks list - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - # Ignore covered tasks - if tid in self.tasks_covered: - return False - # Ignore stamped tasks - if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache): - return False - # Ignore noexec tasks - taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] - if 'noexec' in taskdep and taskname in taskdep['noexec']: - return False - - pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] - if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks): - if tid in self.rqdata.runq_setscene_tids: - msg = ['Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)] - else: - msg = ['Task %s.%s attempted to execute unexpectedly' % (pn, taskname)] - for t in self.scenequeue_notcovered: - msg.append("\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash)) - msg.append('\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered)) - logger.error("".join(msg)) - return True - return False - -class SQData(object): - def __init__(self): - # SceneQueue dependencies - self.sq_deps = {} - # SceneQueue reverse dependencies - self.sq_revdeps = {} - # Injected inter-setscene task dependencies - self.sq_harddeps = {} - self.sq_harddeps_rev = {} - # Cache of stamp files so duplicates can't run in parallel - self.stamps = {} - # Setscene tasks directly depended upon by the build - self.unskippable = set() - # List of setscene tasks which aren't present - self.outrightfail = set() - # A list of normal tasks a setscene task covers - self.sq_covered_tasks = {} - -def build_scenequeue_data(sqdata, rqdata, sqrq): - - sq_revdeps = {} - sq_revdeps_squash = {} - sq_collated_deps = {} - - # We can't skip specified target tasks which aren't setscene tasks - sqdata.cantskip = set(rqdata.target_tids) - sqdata.cantskip.difference_update(rqdata.runq_setscene_tids) - sqdata.cantskip.intersection_update(rqdata.runtaskentries) - - # We need to construct a dependency graph for the setscene functions. Intermediate - # dependencies between the setscene tasks only complicate the code. This code - # therefore aims to collapse the huge runqueue dependency tree into a smaller one - # only containing the setscene functions. - - rqdata.init_progress_reporter.next_stage() - - # First process the chains up to the first setscene task. - endpoints = {} - for tid in rqdata.runtaskentries: - sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps) - sq_revdeps_squash[tid] = set() - if not sq_revdeps[tid] and tid not in rqdata.runq_setscene_tids: - #bb.warn("Added endpoint %s" % (tid)) - endpoints[tid] = set() - - rqdata.init_progress_reporter.next_stage() - - # Secondly process the chains between setscene tasks. - for tid in rqdata.runq_setscene_tids: - sq_collated_deps[tid] = set() - #bb.warn("Added endpoint 2 %s" % (tid)) - for dep in rqdata.runtaskentries[tid].depends: - if tid in sq_revdeps[dep]: - sq_revdeps[dep].remove(tid) - if dep not in endpoints: - endpoints[dep] = set() - #bb.warn(" Added endpoint 3 %s" % (dep)) - endpoints[dep].add(tid) - - rqdata.init_progress_reporter.next_stage() - - def process_endpoints(endpoints): - newendpoints = {} - for point, task in endpoints.items(): - tasks = set() - if task: - tasks |= task - if sq_revdeps_squash[point]: - tasks |= sq_revdeps_squash[point] - if point not in rqdata.runq_setscene_tids: - for t in tasks: - sq_collated_deps[t].add(point) - sq_revdeps_squash[point] = set() - if point in rqdata.runq_setscene_tids: - sq_revdeps_squash[point] = tasks - continue - for dep in rqdata.runtaskentries[point].depends: - if point in sq_revdeps[dep]: - sq_revdeps[dep].remove(point) - if tasks: - sq_revdeps_squash[dep] |= tasks - if not sq_revdeps[dep] and dep not in rqdata.runq_setscene_tids: - newendpoints[dep] = task - if newendpoints: - process_endpoints(newendpoints) - - process_endpoints(endpoints) - - rqdata.init_progress_reporter.next_stage() - - # Build a list of tasks which are "unskippable" - # These are direct endpoints referenced by the build upto and including setscene tasks - # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon - new = True - for tid in rqdata.runtaskentries: - if not rqdata.runtaskentries[tid].revdeps: - sqdata.unskippable.add(tid) - sqdata.unskippable |= sqdata.cantskip - while new: - new = False - orig = sqdata.unskippable.copy() - for tid in sorted(orig, reverse=True): - if tid in rqdata.runq_setscene_tids: - continue - if not rqdata.runtaskentries[tid].depends: - # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable - sqrq.setbuildable(tid) - sqdata.unskippable |= rqdata.runtaskentries[tid].depends - if sqdata.unskippable != orig: - new = True - - sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids) - - rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries)) - - # Sanity check all dependencies could be changed to setscene task references - for tid in rqdata.runtaskentries: - if tid in rqdata.runq_setscene_tids: - pass - elif sq_revdeps_squash[tid]: - bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, halting. Please report this problem.") - else: - del sq_revdeps_squash[tid] - - rqdata.init_progress_reporter.next_stage() - - # Resolve setscene inter-task dependencies - # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene" - # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies - for tid in rqdata.runq_setscene_tids: - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - realtid = tid + "_setscene" - idepends = rqdata.taskData[mc].taskentries[realtid].idepends - sqdata.stamps[tid] = bb.parse.siggen.stampfile_mcfn(taskname, taskfn, extrainfo=False) - - sqdata.sq_harddeps_rev[tid] = set() - for (depname, idependtask) in idepends: - - if depname not in rqdata.taskData[mc].build_targets: - continue - - depfn = rqdata.taskData[mc].build_targets[depname][0] - if depfn is None: - continue - deptid = depfn + ":" + idependtask.replace("_setscene", "") - if deptid not in rqdata.runtaskentries: - bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask)) - - logger.debug2("Adding hard setscene dependency %s for %s" % (deptid, tid)) - - if not deptid in sqdata.sq_harddeps: - sqdata.sq_harddeps[deptid] = set() - sqdata.sq_harddeps[deptid].add(tid) - sqdata.sq_harddeps_rev[tid].add(deptid) - - rqdata.init_progress_reporter.next_stage() - - rqdata.init_progress_reporter.next_stage() - - #for tid in sq_revdeps_squash: - # data = "" - # for dep in sq_revdeps_squash[tid]: - # data = data + "\n %s" % dep - # bb.warn("Task %s_setscene: is %s " % (tid, data)) - - sqdata.sq_revdeps = sq_revdeps_squash - sqdata.sq_covered_tasks = sq_collated_deps - - # Build reverse version of revdeps to populate deps structure - for tid in sqdata.sq_revdeps: - sqdata.sq_deps[tid] = set() - for tid in sqdata.sq_revdeps: - for dep in sqdata.sq_revdeps[tid]: - sqdata.sq_deps[dep].add(tid) - - rqdata.init_progress_reporter.next_stage() - - sqdata.multiconfigs = set() - for tid in sqdata.sq_revdeps: - sqdata.multiconfigs.add(mc_from_tid(tid)) - if not sqdata.sq_revdeps[tid]: - sqrq.sq_buildable.add(tid) - - rqdata.init_progress_reporter.next_stage() - - sqdata.noexec = set() - sqdata.stamppresent = set() - sqdata.valid = set() - - sqdata.hashes = {} - sqrq.sq_deferred = {} - for mc in sorted(sqdata.multiconfigs): - for tid in sorted(sqdata.sq_revdeps): - if mc_from_tid(tid) != mc: - continue - h = pending_hash_index(tid, rqdata) - if h not in sqdata.hashes: - sqdata.hashes[h] = tid - else: - sqrq.sq_deferred[tid] = sqdata.hashes[h] - bb.debug(1, "Deferring %s after %s" % (tid, sqdata.hashes[h])) - -def check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=False): - - (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - - taskdep = rqdata.dataCaches[mc].task_deps[taskfn] - - if 'noexec' in taskdep and taskname in taskdep['noexec']: - bb.build.make_stamp_mcfn(taskname + "_setscene", taskfn) - return True, False - - if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache): - logger.debug2('Setscene stamp current for task %s', tid) - return False, True - - if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache): - logger.debug2('Normal stamp current for task %s', tid) - return False, True - - return False, False - -def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True): - - tocheck = set() - - for tid in sorted(tids): - if tid in sqdata.stamppresent: - sqdata.stamppresent.remove(tid) - if tid in sqdata.valid: - sqdata.valid.remove(tid) - if tid in sqdata.outrightfail: - sqdata.outrightfail.remove(tid) - - noexec, stamppresent = check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=True) - - if noexec: - sqdata.noexec.add(tid) - sqrq.sq_task_skip(tid) - logger.debug2("%s is noexec so skipping setscene" % (tid)) - continue - - if stamppresent: - sqdata.stamppresent.add(tid) - sqrq.sq_task_skip(tid) - logger.debug2("%s has a valid stamp, skipping" % (tid)) - continue - - tocheck.add(tid) - - sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary) - - for tid in tids: - if tid in sqdata.stamppresent: - continue - if tid in sqdata.valid: - continue - if tid in sqdata.noexec: - continue - if tid in sqrq.scenequeue_covered: - continue - if tid in sqrq.scenequeue_notcovered: - continue - if tid in sqrq.sq_deferred: - continue - sqdata.outrightfail.add(tid) - logger.debug2("%s already handled (fallthrough), skipping" % (tid)) - -class TaskFailure(Exception): - """ - Exception raised when a task in a runqueue fails - """ - def __init__(self, x): - self.args = x - - -class runQueueExitWait(bb.event.Event): - """ - Event when waiting for task processes to exit - """ - - def __init__(self, remain): - self.remain = remain - self.message = "Waiting for %s active tasks to finish" % remain - bb.event.Event.__init__(self) - -class runQueueEvent(bb.event.Event): - """ - Base runQueue event class - """ - def __init__(self, task, stats, rq): - self.taskid = task - self.taskstring = task - self.taskname = taskname_from_tid(task) - self.taskfile = fn_from_tid(task) - self.taskhash = rq.rqdata.get_task_hash(task) - self.stats = stats.copy() - bb.event.Event.__init__(self) - -class sceneQueueEvent(runQueueEvent): - """ - Base sceneQueue event class - """ - def __init__(self, task, stats, rq, noexec=False): - runQueueEvent.__init__(self, task, stats, rq) - self.taskstring = task + "_setscene" - self.taskname = taskname_from_tid(task) + "_setscene" - self.taskfile = fn_from_tid(task) - self.taskhash = rq.rqdata.get_task_hash(task) - -class runQueueTaskStarted(runQueueEvent): - """ - Event notifying a task was started - """ - def __init__(self, task, stats, rq, noexec=False): - runQueueEvent.__init__(self, task, stats, rq) - self.noexec = noexec - -class sceneQueueTaskStarted(sceneQueueEvent): - """ - Event notifying a setscene task was started - """ - def __init__(self, task, stats, rq, noexec=False): - sceneQueueEvent.__init__(self, task, stats, rq) - self.noexec = noexec - -class runQueueTaskFailed(runQueueEvent): - """ - Event notifying a task failed - """ - def __init__(self, task, stats, exitcode, rq, fakeroot_log=None): - runQueueEvent.__init__(self, task, stats, rq) - self.exitcode = exitcode - self.fakeroot_log = fakeroot_log - - def __str__(self): - if self.fakeroot_log: - return "Task (%s) failed with exit code '%s' \nPseudo log:\n%s" % (self.taskstring, self.exitcode, self.fakeroot_log) - else: - return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode) - -class sceneQueueTaskFailed(sceneQueueEvent): - """ - Event notifying a setscene task failed - """ - def __init__(self, task, stats, exitcode, rq): - sceneQueueEvent.__init__(self, task, stats, rq) - self.exitcode = exitcode - - def __str__(self): - return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode) - -class sceneQueueComplete(sceneQueueEvent): - """ - Event when all the sceneQueue tasks are complete - """ - def __init__(self, stats, rq): - self.stats = stats.copy() - bb.event.Event.__init__(self) - -class runQueueTaskCompleted(runQueueEvent): - """ - Event notifying a task completed - """ - -class sceneQueueTaskCompleted(sceneQueueEvent): - """ - Event notifying a setscene task completed - """ - -class runQueueTaskSkipped(runQueueEvent): - """ - Event notifying a task was skipped - """ - def __init__(self, task, stats, rq, reason): - runQueueEvent.__init__(self, task, stats, rq) - self.reason = reason - -class taskUniHashUpdate(bb.event.Event): - """ - Base runQueue event class - """ - def __init__(self, task, unihash): - self.taskid = task - self.unihash = unihash - bb.event.Event.__init__(self) - -class runQueuePipe(): - """ - Abstraction for a pipe between a worker thread and the server - """ - def __init__(self, pipein, pipeout, d, rq, rqexec, fakerootlogs=None): - self.input = pipein - if pipeout: - pipeout.close() - bb.utils.nonblockingfd(self.input) - self.queue = bytearray() - self.d = d - self.rq = rq - self.rqexec = rqexec - self.fakerootlogs = fakerootlogs - - def read(self): - for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]: - for worker in workers.values(): - worker.process.poll() - if worker.process.returncode is not None and not self.rq.teardown: - bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode))) - self.rq.finish_runqueue(True) - - start = len(self.queue) - try: - self.queue.extend(self.input.read(512 * 1024) or b"") - except (OSError, IOError) as e: - if e.errno != errno.EAGAIN: - raise - end = len(self.queue) - found = True - while found and self.queue: - found = False - index = self.queue.find(b"") - while index != -1 and self.queue.startswith(b""): - try: - event = pickle.loads(self.queue[7:index]) - except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e: - if isinstance(e, pickle.UnpicklingError) and "truncated" in str(e): - # The pickled data could contain "" so search for the next occurance - # unpickling again, this should be the only way an unpickle error could occur - index = self.queue.find(b"", index + 1) - continue - bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index])) - bb.event.fire_from_worker(event, self.d) - if isinstance(event, taskUniHashUpdate): - self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash)) - found = True - self.queue = self.queue[index+8:] - index = self.queue.find(b"") - index = self.queue.find(b"") - while index != -1 and self.queue.startswith(b""): - try: - task, status = pickle.loads(self.queue[10:index]) - except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e: - bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index])) - (_, _, _, taskfn) = split_tid_mcfn(task) - fakerootlog = None - if self.fakerootlogs and taskfn and taskfn in self.fakerootlogs: - fakerootlog = self.fakerootlogs[taskfn] - self.rqexec.runqueue_process_waitpid(task, status, fakerootlog=fakerootlog) - found = True - self.queue = self.queue[index+11:] - index = self.queue.find(b"") - return (end > start) - - def close(self): - while self.read(): - continue - if self.queue: - print("Warning, worker left partial message: %s" % self.queue) - self.input.close() - -def get_setscene_enforce_ignore_tasks(d, targets): - if d.getVar('BB_SETSCENE_ENFORCE') != '1': - return None - ignore_tasks = (d.getVar("BB_SETSCENE_ENFORCE_IGNORE_TASKS") or "").split() - outlist = [] - for item in ignore_tasks[:]: - if item.startswith('%:'): - for (mc, target, task, fn) in targets: - outlist.append(target + ':' + item.split(':')[1]) - else: - outlist.append(item) - return outlist - -def check_setscene_enforce_ignore_tasks(pn, taskname, ignore_tasks): - import fnmatch - if ignore_tasks is not None: - item = '%s:%s' % (pn, taskname) - for ignore_tasks in ignore_tasks: - if fnmatch.fnmatch(item, ignore_tasks): - return True - return False - return True diff --git a/bitbake/lib/bb/server/__init__.py b/bitbake/lib/bb/server/__init__.py deleted file mode 100644 index b6f7513181..0000000000 --- a/bitbake/lib/bb/server/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# -# BitBake Base Server Code -# -# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer -# Copyright (C) 2006 - 2008 Richard Purdie -# Copyright (C) 2013 Alexandru Damian -# -# SPDX-License-Identifier: GPL-2.0-only -# diff --git a/bitbake/lib/bb/server/process.py b/bitbake/lib/bb/server/process.py deleted file mode 100644 index d0f73590cc..0000000000 --- a/bitbake/lib/bb/server/process.py +++ /dev/null @@ -1,854 +0,0 @@ -# -# BitBake Process based server. -# -# Copyright (C) 2010 Bob Foerster -# -# SPDX-License-Identifier: GPL-2.0-only -# - -""" - This module implements a multiprocessing.Process based server for bitbake. -""" - -import bb -import bb.event -import logging -from bb import multiprocessing -import threading -import array -import os -import sys -import time -import select -import socket -import subprocess -import errno -import re -import datetime -import pickle -import traceback -import gc -import stat -import bb.server.xmlrpcserver -from bb import daemonize -from multiprocessing import queues - -logger = logging.getLogger('BitBake') - -class ProcessTimeout(SystemExit): - pass - -def currenttime(): - return datetime.datetime.now().strftime('%H:%M:%S.%f') - -def serverlog(msg): - print(str(os.getpid()) + " " + currenttime() + " " + msg) - #Seems a flush here triggers filesytem sync like behaviour and long hangs in the server - #sys.stdout.flush() - -# -# When we have lockfile issues, try and find infomation about which process is -# using the lockfile -# -def get_lockfile_process_msg(lockfile): - # Some systems may not have lsof available - procs = None - try: - procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - # File was deleted? - pass - except OSError as e: - if e.errno != errno.ENOENT: - raise - if procs is None: - # Fall back to fuser if lsof is unavailable - try: - procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - # File was deleted? - pass - except OSError as e: - if e.errno != errno.ENOENT: - raise - if procs: - return procs.decode("utf-8") - return None - -class idleFinish(): - def __init__(self, msg): - self.msg = msg - -class ProcessServer(): - def __init__(self, lock, lockname, sock, sockname, server_timeout, xmlrpcinterface): - self.command_channel = False - self.command_channel_reply = False - self.quit = False - self.heartbeat_seconds = 1 # default, BB_HEARTBEAT_EVENT will be checked once we have a datastore. - self.next_heartbeat = time.time() - - self.event_handle = None - self.hadanyui = False - self.haveui = False - self.maxuiwait = 30 - self.xmlrpc = False - - self.idle = None - # Need a lock for _idlefuns changes - self._idlefuns = {} - self._idlefuncsLock = threading.Lock() - self.idle_cond = threading.Condition(self._idlefuncsLock) - - self.bitbake_lock = lock - self.bitbake_lock_name = lockname - self.sock = sock - self.sockname = sockname - # It is possible the directory may be renamed. Cache the inode of the socket file - # so we can tell if things changed. - self.sockinode = os.stat(self.sockname)[stat.ST_INO] - - self.server_timeout = server_timeout - self.timeout = self.server_timeout - self.xmlrpcinterface = xmlrpcinterface - - def register_idle_function(self, function, data): - """Register a function to be called while the server is idle""" - assert hasattr(function, '__call__') - with bb.utils.lock_timeout(self._idlefuncsLock): - self._idlefuns[function] = data - serverlog("Registering idle function %s" % str(function)) - - def run(self): - - if self.xmlrpcinterface[0]: - self.xmlrpc = bb.server.xmlrpcserver.BitBakeXMLRPCServer(self.xmlrpcinterface, self.cooker, self) - - serverlog("Bitbake XMLRPC server address: %s, server port: %s" % (self.xmlrpc.host, self.xmlrpc.port)) - - try: - self.bitbake_lock.seek(0) - self.bitbake_lock.truncate() - if self.xmlrpc: - self.bitbake_lock.write("%s %s:%s\n" % (os.getpid(), self.xmlrpc.host, self.xmlrpc.port)) - else: - self.bitbake_lock.write("%s\n" % (os.getpid())) - self.bitbake_lock.flush() - except Exception as e: - serverlog("Error writing to lock file: %s" % str(e)) - pass - - return bb.utils.profile_function("main" in self.cooker.configuration.profile, self.main, "profile-mainloop.log") - - def _idle_check(self): - return len(self._idlefuns) == 0 and self.cooker.command.currentAsyncCommand is None - - def wait_for_idle(self, timeout=30): - # Wait for the idle loop to have cleared - with bb.utils.lock_timeout(self._idlefuncsLock): - return self.idle_cond.wait_for(self._idle_check, timeout) is not False - - def set_async_cmd(self, cmd): - with bb.utils.lock_timeout(self._idlefuncsLock): - ret = self.idle_cond.wait_for(self._idle_check, 30) - if ret is False: - return False - self.cooker.command.currentAsyncCommand = cmd - return True - - def clear_async_cmd(self): - with bb.utils.lock_timeout(self._idlefuncsLock): - self.cooker.command.currentAsyncCommand = None - self.idle_cond.notify_all() - - def get_async_cmd(self): - with bb.utils.lock_timeout(self._idlefuncsLock): - return self.cooker.command.currentAsyncCommand - - def main(self): - self.cooker.pre_serve() - - bb.utils.set_process_name("Cooker") - - ready = [] - newconnections = [] - - self.controllersock = False - fds = [self.sock] - if self.xmlrpc: - fds.append(self.xmlrpc) - seendata = False - serverlog("Entering server connection loop") - serverlog("Lockfile is: %s\nSocket is %s (%s)" % (self.bitbake_lock_name, self.sockname, os.path.exists(self.sockname))) - - def disconnect_client(self, fds): - serverlog("Disconnecting Client (socket: %s)" % os.path.exists(self.sockname)) - if self.controllersock: - fds.remove(self.controllersock) - self.controllersock.close() - self.controllersock = False - if self.haveui: - # Wait for the idle loop to have cleared (30s max) - if not self.wait_for_idle(30): - serverlog("Idle loop didn't finish queued commands after 30s, exiting.") - self.quit = True - fds.remove(self.command_channel) - bb.event.unregister_UIHhandler(self.event_handle, True) - self.command_channel_reply.writer.close() - self.event_writer.writer.close() - self.command_channel.close() - self.command_channel = False - del self.event_writer - self.lastui = time.time() - self.cooker.clientComplete() - self.haveui = False - ready = select.select(fds,[],[],0)[0] - if newconnections and not self.quit: - serverlog("Starting new client") - conn = newconnections.pop(-1) - fds.append(conn) - self.controllersock = conn - elif not self.timeout and not ready: - serverlog("No timeout, exiting.") - self.quit = True - - self.lastui = time.time() - while not self.quit: - if self.sock in ready: - while select.select([self.sock],[],[],0)[0]: - controllersock, address = self.sock.accept() - if self.controllersock: - serverlog("Queuing %s (%s)" % (str(ready), str(newconnections))) - newconnections.append(controllersock) - else: - serverlog("Accepting %s (%s)" % (str(ready), str(newconnections))) - self.controllersock = controllersock - fds.append(controllersock) - if self.controllersock in ready: - try: - serverlog("Processing Client") - ui_fds = recvfds(self.controllersock, 3) - serverlog("Connecting Client") - - # Where to write events to - writer = ConnectionWriter(ui_fds[0]) - self.event_handle = bb.event.register_UIHhandler(writer, True) - self.event_writer = writer - - # Where to read commands from - reader = ConnectionReader(ui_fds[1]) - fds.append(reader) - self.command_channel = reader - - # Where to send command return values to - writer = ConnectionWriter(ui_fds[2]) - self.command_channel_reply = writer - - self.haveui = True - self.hadanyui = True - - except (EOFError, OSError): - disconnect_client(self, fds) - - if not self.timeout == -1.0 and not self.haveui and self.timeout and \ - (self.lastui + self.timeout) < time.time(): - serverlog("Server timeout, exiting.") - self.quit = True - - # If we don't see a UI connection within maxuiwait, its unlikely we're going to see - # one. We have had issue with processes hanging indefinitely so timing out UI-less - # servers is useful. - if not self.hadanyui and not self.xmlrpc and not self.timeout and (self.lastui + self.maxuiwait) < time.time(): - serverlog("No UI connection within max timeout, exiting to avoid infinite loop.") - self.quit = True - - if self.command_channel in ready: - try: - command = self.command_channel.get() - except EOFError: - # Client connection shutting down - ready = [] - disconnect_client(self, fds) - continue - if command[0] == "terminateServer": - self.quit = True - continue - try: - serverlog("Running command %s" % command) - reply = self.cooker.command.runCommand(command, self) - serverlog("Sending reply %s" % repr(reply)) - self.command_channel_reply.send(reply) - serverlog("Command Completed (socket: %s)" % os.path.exists(self.sockname)) - except Exception as e: - stack = traceback.format_exc() - serverlog('Exception in server main event loop running command %s (%s)' % (command, stack)) - logger.exception('Exception in server main event loop running command %s (%s)' % (command, stack)) - - if self.xmlrpc in ready: - self.xmlrpc.handle_requests() - - if not seendata and hasattr(self.cooker, "data"): - heartbeat_event = self.cooker.data.getVar('BB_HEARTBEAT_EVENT') - if heartbeat_event: - try: - self.heartbeat_seconds = float(heartbeat_event) - except: - bb.warn('Ignoring invalid BB_HEARTBEAT_EVENT=%s, must be a float specifying seconds.' % heartbeat_event) - - self.timeout = self.server_timeout or self.cooker.data.getVar('BB_SERVER_TIMEOUT') - try: - if self.timeout: - self.timeout = float(self.timeout) - except: - bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout) - seendata = True - - if not self.idle: - self.idle = threading.Thread(target=self.idle_thread) - self.idle.start() - elif self.idle and not self.idle.is_alive(): - serverlog("Idle thread terminated, main thread exiting too") - bb.error("Idle thread terminated, main thread exiting too") - self.quit = True - - nextsleep = 1.0 - if self.xmlrpc: - nextsleep = self.xmlrpc.get_timeout(nextsleep) - try: - ready = select.select(fds,[],[],nextsleep)[0] - except InterruptedError: - # Ignore EINTR - ready = [] - - if self.idle: - self.idle.join() - - serverlog("Exiting (socket: %s)" % os.path.exists(self.sockname)) - # Remove the socket file so we don't get any more connections to avoid races - # The build directory could have been renamed so if the file isn't the one we created - # we shouldn't delete it. - try: - sockinode = os.stat(self.sockname)[stat.ST_INO] - if sockinode == self.sockinode: - os.unlink(self.sockname) - else: - serverlog("bitbake.sock inode mismatch (%s vs %s), not deleting." % (sockinode, self.sockinode)) - except Exception as err: - serverlog("Removing socket file '%s' failed (%s)" % (self.sockname, err)) - self.sock.close() - - try: - self.cooker.shutdown(True, idle=False) - self.cooker.notifier.stop() - self.cooker.confignotifier.stop() - except: - pass - - self.cooker.post_serve() - - if len(threading.enumerate()) != 1: - serverlog("More than one thread left?: " + str(threading.enumerate())) - - # Flush logs before we release the lock - sys.stdout.flush() - sys.stderr.flush() - - # Finally release the lockfile but warn about other processes holding it open - lock = self.bitbake_lock - lockfile = self.bitbake_lock_name - - def get_lock_contents(lockfile): - try: - with open(lockfile, "r") as f: - return f.readlines() - except FileNotFoundError: - return None - - lock.close() - lock = None - - while not lock: - i = 0 - lock = None - if not os.path.exists(os.path.basename(lockfile)): - serverlog("Lockfile directory gone, exiting.") - return - - while not lock and i < 30: - lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=False) - if not lock: - newlockcontents = get_lock_contents(lockfile) - if not newlockcontents[0].startswith([f"{os.getpid()}\n", f"{os.getpid()} "]): - # A new server was started, the lockfile contents changed, we can exit - serverlog("Lockfile now contains different contents, exiting: " + str(newlockcontents)) - return - time.sleep(0.1) - i += 1 - if lock: - # We hold the lock so we can remove the file (hide stale pid data) - # via unlockfile. - bb.utils.unlockfile(lock) - serverlog("Exiting as we could obtain the lock") - return - - if not lock: - procs = get_lockfile_process_msg(lockfile) - msg = ["Delaying shutdown due to active processes which appear to be holding bitbake.lock"] - if procs: - msg.append(":\n%s" % procs) - serverlog("".join(msg)) - - def idle_thread(self): - bb.utils.profile_function("idle" in self.cooker.configuration.profile, self.idle_thread_internal, "profile-idleloop.log") - - def idle_thread_internal(self): - def remove_idle_func(function): - with bb.utils.lock_timeout(self._idlefuncsLock): - del self._idlefuns[function] - self.idle_cond.notify_all() - - while not self.quit: - nextsleep = 1.0 - fds = [] - - with bb.utils.lock_timeout(self._idlefuncsLock): - items = list(self._idlefuns.items()) - - for function, data in items: - try: - retval = function(self, data, False) - if isinstance(retval, idleFinish): - serverlog("Removing idle function %s at idleFinish" % str(function)) - remove_idle_func(function) - self.cooker.command.finishAsyncCommand(retval.msg) - nextsleep = None - elif retval is False: - serverlog("Removing idle function %s" % str(function)) - remove_idle_func(function) - nextsleep = None - elif retval is True: - nextsleep = None - elif isinstance(retval, float) and nextsleep: - if (retval < nextsleep): - nextsleep = retval - elif nextsleep is None: - continue - else: - fds = fds + retval - except SystemExit: - raise - except Exception as exc: - if not isinstance(exc, bb.BBHandledException): - logger.exception('Running idle function') - remove_idle_func(function) - serverlog("Exception %s broke the idle_thread, exiting" % traceback.format_exc()) - self.quit = True - - # Create new heartbeat event? - now = time.time() - if items and bb.event._heartbeat_enabled and now >= self.next_heartbeat: - # We might have missed heartbeats. Just trigger once in - # that case and continue after the usual delay. - self.next_heartbeat += self.heartbeat_seconds - if self.next_heartbeat <= now: - self.next_heartbeat = now + self.heartbeat_seconds - if hasattr(self.cooker, "data"): - heartbeat = bb.event.HeartbeatEvent(now) - try: - bb.event.fire(heartbeat, self.cooker.data) - except Exception as exc: - if not isinstance(exc, bb.BBHandledException): - logger.exception('Running heartbeat function') - serverlog("Exception %s broke in idle_thread, exiting" % traceback.format_exc()) - self.quit = True - if nextsleep and bb.event._heartbeat_enabled and now + nextsleep > self.next_heartbeat: - # Shorten timeout so that we we wake up in time for - # the heartbeat. - nextsleep = self.next_heartbeat - now - - if nextsleep is not None: - select.select(fds,[],[],nextsleep)[0] - -class ServerCommunicator(): - def __init__(self, connection, recv): - self.connection = connection - self.recv = recv - - def runCommand(self, command): - try: - self.connection.send(command) - except BrokenPipeError as e: - raise BrokenPipeError("bitbake-server might have died or been forcibly stopped, ie. OOM killed") from e - if not self.recv.poll(30): - logger.info("No reply from server in 30s (for command %s at %s)" % (command[0], currenttime())) - if not self.recv.poll(30): - raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s at %s)" % currenttime()) - try: - ret, exc = self.recv.get() - except EOFError as e: - raise EOFError("bitbake-server might have died or been forcibly stopped, ie. OOM killed") from e - # Should probably turn all exceptions in exc back into exceptions? - # For now, at least handle BBHandledException - if exc and ("BBHandledException" in exc or "SystemExit" in exc): - raise bb.BBHandledException() - return ret, exc - - def updateFeatureSet(self, featureset): - _, error = self.runCommand(["setFeatures", featureset]) - if error: - logger.error("Unable to set the cooker to the correct featureset: %s" % error) - raise BaseException(error) - - def getEventHandle(self): - handle, error = self.runCommand(["getUIHandlerNum"]) - if error: - logger.error("Unable to get UI Handler Number: %s" % error) - raise BaseException(error) - - return handle - - def terminateServer(self): - self.connection.send(['terminateServer']) - return - -class BitBakeProcessServerConnection(object): - def __init__(self, ui_channel, recv, eq, sock): - self.connection = ServerCommunicator(ui_channel, recv) - self.events = eq - # Save sock so it doesn't get gc'd for the life of our connection - self.socket_connection = sock - - def terminate(self): - self.events.close() - self.socket_connection.close() - self.connection.connection.close() - self.connection.recv.close() - return - -start_log_format = '--- Starting bitbake server pid %s at %s ---' -start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f' - -class BitBakeServer(object): - - def __init__(self, lock, sockname, featureset, server_timeout, xmlrpcinterface, profile): - - self.server_timeout = server_timeout - self.xmlrpcinterface = xmlrpcinterface - self.featureset = featureset - self.sockname = sockname - self.bitbake_lock = lock - self.profile = profile - self.readypipe, self.readypipein = os.pipe() - - # Place the log in the builddirectory alongside the lock file - logfile = os.path.join(os.path.dirname(self.bitbake_lock.name), "bitbake-cookerdaemon.log") - self.logfile = logfile - - startdatetime = datetime.datetime.now() - bb.daemonize.createDaemon(self._startServer, logfile) - self.bitbake_lock.close() - os.close(self.readypipein) - - ready = ConnectionReader(self.readypipe) - r = ready.poll(5) - if not r: - bb.note("Bitbake server didn't start within 5 seconds, waiting for 90") - r = ready.poll(90) - if r: - try: - r = ready.get() - except EOFError: - # Trap the child exiting/closing the pipe and error out - r = None - if not r or r[0] != "r": - ready.close() - bb.error("Unable to start bitbake server (%s)" % str(r)) - if os.path.exists(logfile): - logstart_re = re.compile(start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)')) - started = False - lines = [] - lastlines = [] - with open(logfile, "r") as f: - for line in f: - if started: - lines.append(line) - else: - lastlines.append(line) - res = logstart_re.search(line.rstrip()) - if res: - ldatetime = datetime.datetime.strptime(res.group(2), start_log_datetime_format) - if ldatetime >= startdatetime: - started = True - lines.append(line) - if len(lastlines) > 60: - lastlines = lastlines[-60:] - if lines: - if len(lines) > 60: - bb.error("Last 60 lines of server log for this session (%s):\n%s" % (logfile, "".join(lines[-60:]))) - else: - bb.error("Server log for this session (%s):\n%s" % (logfile, "".join(lines))) - elif lastlines: - bb.error("Server didn't start, last 60 loglines (%s):\n%s" % (logfile, "".join(lastlines))) - else: - bb.error("%s doesn't exist" % logfile) - - raise SystemExit(1) - - ready.close() - - def _startServer(self): - os.close(self.readypipe) - os.set_inheritable(self.bitbake_lock.fileno(), True) - os.set_inheritable(self.readypipein, True) - serverscript = os.path.realpath(os.path.dirname(__file__) + "/../../../bin/bitbake-server") - os.execl(sys.executable, sys.executable, serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(list(self.profile)), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1])) - -def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface, profile): - - import bb.cookerdata - import bb.cooker - - serverlog(start_log_format % (os.getpid(), datetime.datetime.now().strftime(start_log_datetime_format))) - - try: - bitbake_lock = os.fdopen(lockfd, "w") - - # Create server control socket - if os.path.exists(sockname): - serverlog("WARNING: removing existing socket file '%s'" % sockname) - os.unlink(sockname) - - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - # AF_UNIX has path length issues so chdir here to workaround - cwd = os.getcwd() - try: - os.chdir(os.path.dirname(sockname)) - sock.bind(os.path.basename(sockname)) - finally: - os.chdir(cwd) - sock.listen(1) - - server = ProcessServer(bitbake_lock, lockname, sock, sockname, server_timeout, xmlrpcinterface) - writer = ConnectionWriter(readypipeinfd) - try: - featureset = [] - cooker = bb.cooker.BBCooker(featureset, server) - cooker.configuration.profile = profile - except bb.BBHandledException: - return None - writer.send("r") - writer.close() - server.cooker = cooker - serverlog("Started bitbake server pid %d" % os.getpid()) - - server.run() - finally: - # Flush any messages/errors to the logfile before exit - sys.stdout.flush() - sys.stderr.flush() - -def connectProcessServer(sockname, featureset): - # Connect to socket - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - # AF_UNIX has path length issues so chdir here to workaround - cwd = os.getcwd() - - readfd = writefd = readfd1 = writefd1 = readfd2 = writefd2 = None - eq = command_chan_recv = command_chan = None - - sock.settimeout(10) - - try: - try: - os.chdir(os.path.dirname(sockname)) - finished = False - while not finished: - try: - sock.connect(os.path.basename(sockname)) - finished = True - except IOError as e: - if e.errno == errno.EWOULDBLOCK: - pass - raise - finally: - os.chdir(cwd) - - # Send an fd for the remote to write events to - readfd, writefd = os.pipe() - eq = BBUIEventQueue(readfd) - # Send an fd for the remote to recieve commands from - readfd1, writefd1 = os.pipe() - command_chan = ConnectionWriter(writefd1) - # Send an fd for the remote to write commands results to - readfd2, writefd2 = os.pipe() - command_chan_recv = ConnectionReader(readfd2) - - sendfds(sock, [writefd, readfd1, writefd2]) - - server_connection = BitBakeProcessServerConnection(command_chan, command_chan_recv, eq, sock) - - # Close the ends of the pipes we won't use - for i in [writefd, readfd1, writefd2]: - os.close(i) - - server_connection.connection.updateFeatureSet(featureset) - - except (Exception, SystemExit) as e: - if command_chan_recv: - command_chan_recv.close() - if command_chan: - command_chan.close() - for i in [writefd, readfd1, writefd2]: - try: - if i: - os.close(i) - except OSError: - pass - sock.close() - raise - - return server_connection - -def sendfds(sock, fds): - '''Send an array of fds over an AF_UNIX socket.''' - fds = array.array('i', fds) - msg = bytes([len(fds) % 256]) - sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) - -def recvfds(sock, size): - '''Receive an array of fds over an AF_UNIX socket.''' - a = array.array('i') - bytes_size = a.itemsize * size - msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size)) - if not msg and not ancdata: - raise EOFError - try: - if len(ancdata) != 1: - raise RuntimeError('received %d items of ancdata' % - len(ancdata)) - cmsg_level, cmsg_type, cmsg_data = ancdata[0] - if (cmsg_level == socket.SOL_SOCKET and - cmsg_type == socket.SCM_RIGHTS): - if len(cmsg_data) % a.itemsize != 0: - raise ValueError - a.frombytes(cmsg_data) - assert len(a) % 256 == msg[0] - return list(a) - except (ValueError, IndexError): - pass - raise RuntimeError('Invalid data received') - -class BBUIEventQueue: - def __init__(self, readfd): - - self.eventQueue = [] - self.eventQueueLock = threading.Lock() - self.eventQueueNotify = threading.Event() - - self.reader = ConnectionReader(readfd) - - self.t = threading.Thread() - self.t.run = self.startCallbackHandler - self.t.start() - - def getEvent(self): - with bb.utils.lock_timeout(self.eventQueueLock): - if len(self.eventQueue) == 0: - return None - - item = self.eventQueue.pop(0) - if len(self.eventQueue) == 0: - self.eventQueueNotify.clear() - - return item - - def waitEvent(self, delay): - self.eventQueueNotify.wait(delay) - return self.getEvent() - - def queue_event(self, event): - with bb.utils.lock_timeout(self.eventQueueLock): - self.eventQueue.append(event) - self.eventQueueNotify.set() - - def send_event(self, event): - self.queue_event(pickle.loads(event)) - - def startCallbackHandler(self): - bb.utils.set_process_name("UIEventQueue") - while True: - try: - ready = self.reader.wait(0.25) - if ready: - event = self.reader.get() - self.queue_event(event) - except (EOFError, OSError, TypeError): - # Easiest way to exit is to close the file descriptor to cause an exit - break - - def close(self): - self.reader.close() - self.t.join() - -class ConnectionReader(object): - - def __init__(self, fd): - self.reader = multiprocessing.connection.Connection(fd, writable=False) - self.rlock = multiprocessing.Lock() - - def wait(self, timeout=None): - return multiprocessing.connection.wait([self.reader], timeout) - - def poll(self, timeout=None): - return self.reader.poll(timeout) - - def get(self): - with bb.utils.lock_timeout(self.rlock): - res = self.reader.recv_bytes() - return multiprocessing.reduction.ForkingPickler.loads(res) - - def fileno(self): - return self.reader.fileno() - - def close(self): - return self.reader.close() - - -class ConnectionWriter(object): - - def __init__(self, fd): - self.writer = multiprocessing.connection.Connection(fd, readable=False) - self.wlock = multiprocessing.Lock() - # Why bb.event needs this I have no idea - self.event = self - - def _send(self, obj): - gc.disable() - with bb.utils.lock_timeout(self.wlock): - self.writer.send_bytes(obj) - gc.enable() - - def send(self, obj): - obj = multiprocessing.reduction.ForkingPickler.dumps(obj) - # See notes/code in CookerParser - # We must not terminate holding this lock else processes will hang. - # For SIGTERM, raising afterwards avoids this. - # For SIGINT, we don't want to have written partial data to the pipe. - # pthread_sigmask block/unblock would be nice but doesn't work, https://bugs.python.org/issue47139 - process = multiprocessing.current_process() - if process and hasattr(process, "queue_signals"): - with bb.utils.lock_timeout(process.signal_threadlock): - process.queue_signals = True - self._send(obj) - process.queue_signals = False - - while len(process.signal_received) > 0: - sig = process.signal_received.pop() - process.handle_sig(sig, None) - else: - self._send(obj) - - def fileno(self): - return self.writer.fileno() - - def close(self): - return self.writer.close() diff --git a/bitbake/lib/bb/server/xmlrpcclient.py b/bitbake/lib/bb/server/xmlrpcclient.py deleted file mode 100644 index 442ea7b264..0000000000 --- a/bitbake/lib/bb/server/xmlrpcclient.py +++ /dev/null @@ -1,141 +0,0 @@ -# -# BitBake XMLRPC Client Interface -# -# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer -# Copyright (C) 2006 - 2008 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import socket -import http.client -import xmlrpc.client - -import bb -from bb.ui import uievent - -class BBTransport(xmlrpc.client.Transport): - def __init__(self, timeout): - self.timeout = timeout - self.connection_token = None - xmlrpc.client.Transport.__init__(self) - - # Modified from default to pass timeout to HTTPConnection - def make_connection(self, host): - #return an existing connection if possible. This allows - #HTTP/1.1 keep-alive. - if self._connection and host == self._connection[0]: - return self._connection[1] - - # create a HTTP connection object from a host descriptor - chost, self._extra_headers, x509 = self.get_host_info(host) - #store the host argument along with the connection object - self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout) - return self._connection[1] - - def set_connection_token(self, token): - self.connection_token = token - - def send_content(self, h, body): - if self.connection_token: - h.putheader("Bitbake-token", self.connection_token) - xmlrpc.client.Transport.send_content(self, h, body) - -def _create_server(host, port, timeout = 60): - t = BBTransport(timeout) - s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True) - return s, t - -def check_connection(remote, timeout): - try: - host, port = remote.split(":") - port = int(port) - except Exception as e: - bb.warn("Failed to read remote definition (%s)" % str(e)) - raise e - - server, _transport = _create_server(host, port, timeout) - try: - ret, err = server.runCommand(['getVariable', 'TOPDIR']) - if err or not ret: - return False - except ConnectionError: - return False - return True - -class BitBakeXMLRPCServerConnection(object): - def __init__(self, host, port, clientinfo=("localhost", 0), observer_only = False, featureset = None): - self.connection, self.transport = _create_server(host, port) - self.clientinfo = clientinfo - self.observer_only = observer_only - if featureset: - self.featureset = featureset - else: - self.featureset = [] - - self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo) - - _, error = self.connection.runCommand(["setFeatures", self.featureset]) - if error: - # disconnect the client, we can't make the setFeature work - self.connection.removeClient() - # no need to log it here, the error shall be sent to the client - raise BaseException(error) - - def connect(self, token = None): - if token is None: - if self.observer_only: - token = "observer" - else: - token = self.connection.addClient() - - if token is None: - return None - - self.transport.set_connection_token(token) - return self - - def removeClient(self): - if not self.observer_only: - self.connection.removeClient() - - def terminate(self): - # Don't wait for server indefinitely - socket.setdefaulttimeout(2) - try: - self.events.system_quit() - except: - pass - try: - self.connection.removeClient() - except: - pass - -def connectXMLRPC(remote, featureset, observer_only = False, token = None): - # The format of "remote" must be "server:port" - try: - [host, port] = remote.split(":") - port = int(port) - except Exception as e: - bb.warn("Failed to parse remote definition %s (%s)" % (remote, str(e))) - raise e - - # We need our IP for the server connection. We get the IP - # by trying to connect with the server - try: - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.connect((host, port)) - ip = s.getsockname()[0] - s.close() - except Exception as e: - bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e))) - raise e - try: - connection = BitBakeXMLRPCServerConnection(host, port, (ip, 0), observer_only, featureset) - return connection.connect(token) - except Exception as e: - bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e))) - raise e - - - diff --git a/bitbake/lib/bb/server/xmlrpcserver.py b/bitbake/lib/bb/server/xmlrpcserver.py deleted file mode 100644 index ebc271aca4..0000000000 --- a/bitbake/lib/bb/server/xmlrpcserver.py +++ /dev/null @@ -1,149 +0,0 @@ -# -# BitBake XMLRPC Server Interface -# -# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer -# Copyright (C) 2006 - 2008 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import hashlib -import time -import inspect -from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler -import bb.server.xmlrpcclient - -import bb -import bb.cooker -import bb.event - -# This request handler checks if the request has a "Bitbake-token" header -# field (this comes from the client side) and compares it with its internal -# "Bitbake-token" field (this comes from the server). If the two are not -# equal, it is assumed that a client is trying to connect to the server -# while another client is connected to the server. In this case, a 503 error -# ("service unavailable") is returned to the client. -class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): - def __init__(self, request, client_address, server): - self.server = server - SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server) - - def do_POST(self): - try: - remote_token = self.headers["Bitbake-token"] - except: - remote_token = None - if 0 and remote_token != self.server.connection_token and remote_token != "observer": - self.report_503() - else: - if remote_token == "observer": - self.server.readonly = True - else: - self.server.readonly = False - SimpleXMLRPCRequestHandler.do_POST(self) - - def report_503(self): - self.send_response(503) - response = 'No more client allowed' - self.send_header("Content-type", "text/plain") - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(bytes(response, 'utf-8')) - -class BitBakeXMLRPCServer(SimpleXMLRPCServer): - # remove this when you're done with debugging - # allow_reuse_address = True - - def __init__(self, interface, cooker, parent): - # Use auto port configuration - if interface[1] == -1: - interface = (interface[0], 0) - SimpleXMLRPCServer.__init__(self, interface, - requestHandler=BitBakeXMLRPCRequestHandler, - logRequests=False, allow_none=True) - self.host, self.port = self.socket.getsockname() - self.interface = interface - - self.connection_token = None - self.commands = BitBakeXMLRPCServerCommands(self) - self.register_functions(self.commands, "") - - self.cooker = cooker - self.parent = parent - - - def register_functions(self, context, prefix): - """ - Convenience method for registering all functions in the scope - of this class that start with a common prefix - """ - methodlist = inspect.getmembers(context, inspect.ismethod) - for name, method in methodlist: - if name.startswith(prefix): - self.register_function(method, name[len(prefix):]) - - def get_timeout(self, delay): - socktimeout = self.socket.gettimeout() or delay - return min(socktimeout, delay) - - def handle_requests(self): - self._handle_request_noblock() - -class BitBakeXMLRPCServerCommands: - - def __init__(self, server): - self.server = server - self.has_client = False - self.event_handle = None - - def registerEventHandler(self, host, port): - """ - Register a remote UI Event Handler - """ - s, t = bb.server.xmlrpcclient._create_server(host, port) - - # we don't allow connections if the cooker is running - if self.server.cooker.state in [bb.cooker.State.PARSING, bb.cooker.State.RUNNING]: - return None, f"Cooker is busy: {self.server.cooker.state.name}" - - self.event_handle = bb.event.register_UIHhandler(s, True) - return self.event_handle, 'OK' - - def unregisterEventHandler(self, handlerNum): - """ - Unregister a remote UI Event Handler - """ - ret = bb.event.unregister_UIHhandler(handlerNum, True) - self.event_handle = None - return ret - - def runCommand(self, command): - """ - Run a cooker command on the server - """ - return self.server.cooker.command.runCommand(command, self.server.parent, self.server.readonly) - - def getEventHandle(self): - return self.event_handle - - def terminateServer(self): - """ - Trigger the server to quit - """ - self.server.parent.quit = True - print("XMLRPC Server triggering exit") - return - - def addClient(self): - if self.server.parent.haveui: - return None - token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest() - self.server.connection_token = token - self.server.parent.haveui = True - return token - - def removeClient(self): - if self.server.parent.haveui: - self.server.connection_token = None - self.server.parent.haveui = False - diff --git a/bitbake/lib/bb/siggen.py b/bitbake/lib/bb/siggen.py deleted file mode 100644 index 41eb643012..0000000000 --- a/bitbake/lib/bb/siggen.py +++ /dev/null @@ -1,1269 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import hashlib -import logging -import os -import re -import tempfile -import pickle -import bb.data -import difflib -import simplediff -import json -import types -from contextlib import contextmanager -import bb.compress.zstd -from bb.checksum import FileChecksumCache -from bb import runqueue -import hashserv -import hashserv.client - -logger = logging.getLogger('BitBake.SigGen') -hashequiv_logger = logging.getLogger('BitBake.SigGen.HashEquiv') - -#find_siginfo and find_siginfo_version are set by the metadata siggen -# The minimum version of the find_siginfo function we need -find_siginfo_minversion = 2 - -HASHSERV_ENVVARS = [ - "SSL_CERT_DIR", - "SSL_CERT_FILE", - "NO_PROXY", - "HTTPS_PROXY", - "HTTP_PROXY" -] - -def check_siggen_version(siggen): - if not hasattr(siggen, "find_siginfo_version"): - bb.fatal("Siggen from metadata (OE-Core?) is too old, please update it (no version found)") - if siggen.find_siginfo_version < siggen.find_siginfo_minversion: - bb.fatal("Siggen from metadata (OE-Core?) is too old, please update it (%s vs %s)" % (siggen.find_siginfo_version, siggen.find_siginfo_minversion)) - -class SetEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, set) or isinstance(obj, frozenset): - return dict(_set_object=list(sorted(obj))) - return json.JSONEncoder.default(self, obj) - -def SetDecoder(dct): - if '_set_object' in dct: - return frozenset(dct['_set_object']) - return dct - -def init(d): - siggens = [obj for obj in globals().values() - if type(obj) is type and issubclass(obj, SignatureGenerator)] - - desired = d.getVar("BB_SIGNATURE_HANDLER") or "noop" - for sg in siggens: - if desired == sg.name: - return sg(d) - else: - logger.error("Invalid signature generator '%s', using default 'noop'\n" - "Available generators: %s", desired, - ', '.join(obj.name for obj in siggens)) - return SignatureGenerator(d) - -class SignatureGenerator(object): - """ - """ - name = "noop" - - def __init__(self, data): - self.basehash = {} - self.taskhash = {} - self.unihash = {} - self.runtaskdeps = {} - self.file_checksum_values = {} - self.taints = {} - self.unitaskhashes = {} - self.tidtopn = {} - self.setscenetasks = set() - - def finalise(self, fn, d, varient): - return - - def postparsing_clean_cache(self): - return - - def setup_datacache(self, datacaches): - self.datacaches = datacaches - - def setup_datacache_from_datastore(self, mcfn, d): - # In task context we have no cache so setup internal data structures - # from the fully parsed data store provided - - mc = d.getVar("__BBMULTICONFIG", False) or "" - tasks = d.getVar('__BBTASKS', False) - - self.datacaches = {} - self.datacaches[mc] = types.SimpleNamespace() - setattr(self.datacaches[mc], "stamp", {}) - self.datacaches[mc].stamp[mcfn] = d.getVar('STAMP') - setattr(self.datacaches[mc], "stamp_extrainfo", {}) - self.datacaches[mc].stamp_extrainfo[mcfn] = {} - for t in tasks: - flag = d.getVarFlag(t, "stamp-extra-info") - if flag: - self.datacaches[mc].stamp_extrainfo[mcfn][t] = flag - - def get_cached_unihash(self, tid): - return None - - def get_unihash(self, tid): - unihash = self.get_cached_unihash(tid) - if unihash: - return unihash - return self.taskhash[tid] - - def get_unihashes(self, tids): - return {tid: self.get_unihash(tid) for tid in tids} - - def prep_taskhash(self, tid, deps, dataCaches): - return - - def get_taskhash(self, tid, deps, dataCaches): - self.taskhash[tid] = hashlib.sha256(tid.encode("utf-8")).hexdigest() - return self.taskhash[tid] - - def writeout_file_checksum_cache(self): - """Write/update the file checksum cache onto disk""" - return - - def stampfile_base(self, mcfn): - mc = bb.runqueue.mc_from_tid(mcfn) - return self.datacaches[mc].stamp[mcfn] - - def stampfile_mcfn(self, taskname, mcfn, extrainfo=True): - mc = bb.runqueue.mc_from_tid(mcfn) - stamp = self.datacaches[mc].stamp[mcfn] - if not stamp: - return - - stamp_extrainfo = "" - if extrainfo: - taskflagname = taskname - if taskname.endswith("_setscene"): - taskflagname = taskname.replace("_setscene", "") - stamp_extrainfo = self.datacaches[mc].stamp_extrainfo[mcfn].get(taskflagname) or "" - - return self.stampfile(stamp, mcfn, taskname, stamp_extrainfo) - - def stampfile(self, stampbase, file_name, taskname, extrainfo): - return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.') - - def stampcleanmask_mcfn(self, taskname, mcfn): - mc = bb.runqueue.mc_from_tid(mcfn) - stamp = self.datacaches[mc].stamp[mcfn] - if not stamp: - return [] - - taskflagname = taskname - if taskname.endswith("_setscene"): - taskflagname = taskname.replace("_setscene", "") - stamp_extrainfo = self.datacaches[mc].stamp_extrainfo[mcfn].get(taskflagname) or "" - - return self.stampcleanmask(stamp, mcfn, taskname, stamp_extrainfo) - - def stampcleanmask(self, stampbase, file_name, taskname, extrainfo): - return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.') - - def dump_sigtask(self, mcfn, task, stampbase, runtime): - return - - def invalidate_task(self, task, mcfn): - mc = bb.runqueue.mc_from_tid(mcfn) - stamp = self.datacaches[mc].stamp[mcfn] - bb.utils.remove(stamp) - - def dump_sigs(self, dataCache, options): - return - - def get_taskdata(self): - return (self.runtaskdeps, self.taskhash, self.unihash, self.file_checksum_values, self.taints, self.basehash, self.unitaskhashes, self.tidtopn, self.setscenetasks) - - def set_taskdata(self, data): - self.runtaskdeps, self.taskhash, self.unihash, self.file_checksum_values, self.taints, self.basehash, self.unitaskhashes, self.tidtopn, self.setscenetasks = data - - def reset(self, data): - self.__init__(data) - - def get_taskhashes(self): - return self.taskhash, self.unihash, self.unitaskhashes, self.tidtopn - - def set_taskhashes(self, hashes): - self.taskhash, self.unihash, self.unitaskhashes, self.tidtopn = hashes - - def save_unitaskhashes(self): - return - - def set_setscene_tasks(self, setscene_tasks): - return - - def exit(self): - return - -def build_pnid(mc, pn, taskname): - if mc: - return "mc:" + mc + ":" + pn + ":" + taskname - return pn + ":" + taskname - -class SignatureGeneratorBasic(SignatureGenerator): - """ - """ - name = "basic" - - def __init__(self, data): - self.basehash = {} - self.taskhash = {} - self.unihash = {} - self.runtaskdeps = {} - self.file_checksum_values = {} - self.taints = {} - self.setscenetasks = set() - self.basehash_ignore_vars = set((data.getVar("BB_BASEHASH_IGNORE_VARS") or "").split()) - self.taskhash_ignore_tasks = None - self.init_rundepcheck(data) - checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE") - if checksum_cache_file: - self.checksum_cache = FileChecksumCache() - self.checksum_cache.init_cache(data, checksum_cache_file) - else: - self.checksum_cache = None - - self.unihash_cache = bb.cache.SimpleCache("3") - self.unitaskhashes = self.unihash_cache.init_cache(data, "bb_unihashes.dat", {}) - self.localdirsexclude = (data.getVar("BB_SIGNATURE_LOCAL_DIRS_EXCLUDE") or "CVS .bzr .git .hg .osc .p4 .repo .svn").split() - self.tidtopn = {} - - def init_rundepcheck(self, data): - self.taskhash_ignore_tasks = data.getVar("BB_TASKHASH_IGNORE_TASKS") or None - if self.taskhash_ignore_tasks: - self.twl = re.compile(self.taskhash_ignore_tasks) - else: - self.twl = None - - def _build_data(self, mcfn, d): - - ignore_mismatch = ((d.getVar("BB_HASH_IGNORE_MISMATCH") or '') == '1') - tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d, self.basehash_ignore_vars) - - taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basehash_ignore_vars, mcfn) - - for task in tasklist: - tid = mcfn + ":" + task - if not ignore_mismatch and tid in self.basehash and self.basehash[tid] != basehash[tid]: - bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (tid, self.basehash[tid], basehash[tid])) - bb.error("The following commands may help:") - cmd = "$ bitbake %s -c%s" % (d.getVar('PN'), task) - # Make sure sigdata is dumped before run printdiff - bb.error("%s -Snone" % cmd) - bb.error("Then:") - bb.error("%s -Sprintdiff\n" % cmd) - self.basehash[tid] = basehash[tid] - - return taskdeps, gendeps, lookupcache - - def set_setscene_tasks(self, setscene_tasks): - self.setscenetasks = set(setscene_tasks) - - def finalise(self, fn, d, variant): - - mc = d.getVar("__BBMULTICONFIG", False) or "" - mcfn = fn - if variant or mc: - mcfn = bb.cache.realfn2virtual(fn, variant, mc) - - try: - taskdeps, gendeps, lookupcache = self._build_data(mcfn, d) - except bb.parse.SkipRecipe: - raise - except: - bb.warn("Error during finalise of %s" % mcfn) - raise - - basehashes = {} - for task in taskdeps: - basehashes[task] = self.basehash[mcfn + ":" + task] - - d.setVar("__siggen_basehashes", basehashes) - d.setVar("__siggen_gendeps", gendeps) - d.setVar("__siggen_varvals", lookupcache) - d.setVar("__siggen_taskdeps", taskdeps) - - #Slow but can be useful for debugging mismatched basehashes - #self.setup_datacache_from_datastore(mcfn, d) - #for task in taskdeps: - # self.dump_sigtask(mcfn, task, d.getVar("STAMP"), False) - - def setup_datacache_from_datastore(self, mcfn, d): - super().setup_datacache_from_datastore(mcfn, d) - - mc = bb.runqueue.mc_from_tid(mcfn) - for attr in ["siggen_varvals", "siggen_taskdeps", "siggen_gendeps"]: - if not hasattr(self.datacaches[mc], attr): - setattr(self.datacaches[mc], attr, {}) - self.datacaches[mc].siggen_varvals[mcfn] = d.getVar("__siggen_varvals") - self.datacaches[mc].siggen_taskdeps[mcfn] = d.getVar("__siggen_taskdeps") - self.datacaches[mc].siggen_gendeps[mcfn] = d.getVar("__siggen_gendeps") - - def rundep_check(self, fn, recipename, task, dep, depname, dataCaches): - # Return True if we should keep the dependency, False to drop it - # We only manipulate the dependencies for packages not in the ignore - # list - if self.twl and not self.twl.search(recipename): - # then process the actual dependencies - if self.twl.search(depname): - return False - return True - - def read_taint(self, fn, task, stampbase): - taint = None - try: - with open(stampbase + '.' + task + '.taint', 'r') as taintf: - taint = taintf.read() - except IOError: - pass - return taint - - def prep_taskhash(self, tid, deps, dataCaches): - - (mc, _, task, mcfn) = bb.runqueue.split_tid_mcfn(tid) - - self.basehash[tid] = dataCaches[mc].basetaskhash[tid] - self.runtaskdeps[tid] = [] - self.file_checksum_values[tid] = [] - recipename = dataCaches[mc].pkg_fn[mcfn] - - self.tidtopn[tid] = recipename - # save hashfn for deps into siginfo? - for dep in deps: - (depmc, _, deptask, depmcfn) = bb.runqueue.split_tid_mcfn(dep) - dep_pn = dataCaches[depmc].pkg_fn[depmcfn] - - if not self.rundep_check(mcfn, recipename, task, dep, dep_pn, dataCaches): - continue - - if dep not in self.taskhash: - bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?" % dep) - - dep_pnid = build_pnid(depmc, dep_pn, deptask) - self.runtaskdeps[tid].append((dep_pnid, dep)) - - if task in dataCaches[mc].file_checksums[mcfn]: - if self.checksum_cache: - checksums = self.checksum_cache.get_checksums(dataCaches[mc].file_checksums[mcfn][task], recipename, self.localdirsexclude) - else: - checksums = bb.fetch2.get_file_checksums(dataCaches[mc].file_checksums[mcfn][task], recipename, self.localdirsexclude) - for (f,cs) in checksums: - self.file_checksum_values[tid].append((f,cs)) - - taskdep = dataCaches[mc].task_deps[mcfn] - if 'nostamp' in taskdep and task in taskdep['nostamp']: - # Nostamp tasks need an implicit taint so that they force any dependent tasks to run - if tid in self.taints and self.taints[tid].startswith("nostamp:"): - # Don't reset taint value upon every call - pass - else: - import uuid - taint = str(uuid.uuid4()) - self.taints[tid] = "nostamp:" + taint - - taint = self.read_taint(mcfn, task, dataCaches[mc].stamp[mcfn]) - if taint: - self.taints[tid] = taint - logger.warning("%s is tainted from a forced run" % tid) - - return set(dep for _, dep in self.runtaskdeps[tid]) - - def get_taskhash(self, tid, deps, dataCaches): - - data = self.basehash[tid] - for dep in sorted(self.runtaskdeps[tid]): - data += self.get_unihash(dep[1]) - - for (f, cs) in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path): - if cs: - if "/./" in f: - data += "./" + f.split("/./")[1] - data += cs - - if tid in self.taints: - if self.taints[tid].startswith("nostamp:"): - data += self.taints[tid][8:] - else: - data += self.taints[tid] - - h = hashlib.sha256(data.encode("utf-8")).hexdigest() - self.taskhash[tid] = h - #d.setVar("BB_TASKHASH:task-%s" % task, taskhash[task]) - return h - - def writeout_file_checksum_cache(self): - """Write/update the file checksum cache onto disk""" - if self.checksum_cache: - self.checksum_cache.save_extras() - self.checksum_cache.save_merge() - else: - bb.fetch2.fetcher_parse_save() - bb.fetch2.fetcher_parse_done() - - def save_unitaskhashes(self): - self.unihash_cache.save(self.unitaskhashes) - - def dump_sigtask(self, mcfn, task, stampbase, runtime): - tid = mcfn + ":" + task - mc = bb.runqueue.mc_from_tid(mcfn) - referencestamp = stampbase - if isinstance(runtime, str) and runtime.startswith("customfile"): - sigfile = stampbase - referencestamp = runtime[11:] - elif runtime and tid in self.taskhash: - sigfile = stampbase + "." + task + ".sigdata" + "." + self.get_unihash(tid) - else: - sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[tid] - - with bb.utils.umask(0o002): - bb.utils.mkdirhier(os.path.dirname(sigfile)) - - data = {} - data['task'] = task - data['basehash_ignore_vars'] = self.basehash_ignore_vars - data['taskhash_ignore_tasks'] = self.taskhash_ignore_tasks - data['taskdeps'] = self.datacaches[mc].siggen_taskdeps[mcfn][task] - data['basehash'] = self.basehash[tid] - data['gendeps'] = {} - data['varvals'] = {} - data['varvals'][task] = self.datacaches[mc].siggen_varvals[mcfn][task] - for dep in self.datacaches[mc].siggen_taskdeps[mcfn][task]: - if dep in self.basehash_ignore_vars: - continue - data['gendeps'][dep] = self.datacaches[mc].siggen_gendeps[mcfn][dep] - data['varvals'][dep] = self.datacaches[mc].siggen_varvals[mcfn][dep] - - if runtime and tid in self.taskhash: - data['runtaskdeps'] = [dep[0] for dep in sorted(self.runtaskdeps[tid])] - data['file_checksum_values'] = [] - for f,cs in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path): - if "/./" in f: - data['file_checksum_values'].append(("./" + f.split("/./")[1], cs)) - else: - data['file_checksum_values'].append((os.path.basename(f), cs)) - data['runtaskhashes'] = {} - for dep in self.runtaskdeps[tid]: - data['runtaskhashes'][dep[0]] = self.get_unihash(dep[1]) - data['taskhash'] = self.taskhash[tid] - data['unihash'] = self.get_unihash(tid) - - taint = self.read_taint(mcfn, task, referencestamp) - if taint: - data['taint'] = taint - - if runtime and tid in self.taints: - if 'nostamp:' in self.taints[tid]: - data['taint'] = self.taints[tid] - - computed_basehash = calc_basehash(data) - if computed_basehash != self.basehash[tid]: - bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[tid], tid)) - if runtime and tid in self.taskhash: - computed_taskhash = calc_taskhash(data) - if computed_taskhash != self.taskhash[tid]: - bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[tid], tid)) - sigfile = sigfile.replace(self.taskhash[tid], computed_taskhash) - - fd, tmpfile = bb.utils.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.") - try: - with bb.compress.zstd.open(fd, "wt", encoding="utf-8", num_threads=1) as f: - json.dump(data, f, sort_keys=True, separators=(",", ":"), cls=SetEncoder) - f.flush() - os.chmod(tmpfile, 0o664) - bb.utils.rename(tmpfile, sigfile) - except (OSError, IOError) as err: - try: - os.unlink(tmpfile) - except OSError: - pass - raise err - -class SignatureGeneratorBasicHash(SignatureGeneratorBasic): - name = "basichash" - - def get_stampfile_hash(self, tid): - if tid in self.taskhash: - return self.taskhash[tid] - - # If task is not in basehash, then error - return self.basehash[tid] - - def stampfile(self, stampbase, mcfn, taskname, extrainfo, clean=False): - if taskname.endswith("_setscene"): - tid = mcfn + ":" + taskname[:-9] - else: - tid = mcfn + ":" + taskname - if clean: - h = "*" - else: - h = self.get_stampfile_hash(tid) - - return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.') - - def stampcleanmask(self, stampbase, mcfn, taskname, extrainfo): - return self.stampfile(stampbase, mcfn, taskname, extrainfo, clean=True) - - def invalidate_task(self, task, mcfn): - bb.note("Tainting hash to force rebuild of task %s, %s" % (mcfn, task)) - - mc = bb.runqueue.mc_from_tid(mcfn) - stamp = self.datacaches[mc].stamp[mcfn] - - taintfn = stamp + '.' + task + '.taint' - - import uuid - bb.utils.mkdirhier(os.path.dirname(taintfn)) - # The specific content of the taint file is not really important, - # we just need it to be random, so a random UUID is used - with open(taintfn, 'w') as taintf: - taintf.write(str(uuid.uuid4())) - -class SignatureGeneratorUniHashMixIn(object): - def __init__(self, data): - self.extramethod = {} - # NOTE: The cache only tracks hashes that exist. Hashes that don't - # exist are always queried from the server since it is possible for - # hashes to appear over time, but much less likely for them to - # disappear - self.unihash_exists_cache = set() - self.username = None - self.password = None - self.env = {} - - origenv = data.getVar("BB_ORIGENV") - for e in HASHSERV_ENVVARS: - value = data.getVar(e) - if not value and origenv: - value = origenv.getVar(e) - if value: - self.env[e] = value - super().__init__(data) - - def get_taskdata(self): - return (self.server, self.method, self.extramethod, self.username, self.password, self.env) + super().get_taskdata() - - def set_taskdata(self, data): - self.server, self.method, self.extramethod, self.username, self.password, self.env = data[:6] - super().set_taskdata(data[6:]) - - def get_hashserv_creds(self): - if self.username and self.password: - return { - "username": self.username, - "password": self.password, - } - - return {} - - @contextmanager - def _client_env(self): - orig_env = os.environ.copy() - try: - for k, v in self.env.items(): - os.environ[k] = v - - yield - finally: - for k, v in self.env.items(): - if k in orig_env: - os.environ[k] = orig_env[k] - else: - del os.environ[k] - - @contextmanager - def client(self): - with self._client_env(): - if getattr(self, '_client', None) is None: - self._client = hashserv.create_client(self.server, **self.get_hashserv_creds()) - yield self._client - - def reset(self, data): - self.__close_clients() - return super().reset(data) - - def exit(self): - self.__close_clients() - return super().exit() - - def __close_clients(self): - with self._client_env(): - if getattr(self, '_client', None) is not None: - self._client.close() - self._client = None - if getattr(self, '_client_pool', None) is not None: - self._client_pool.close() - self._client_pool = None - - def get_stampfile_hash(self, tid): - if tid in self.taskhash: - # If a unique hash is reported, use it as the stampfile hash. This - # ensures that if a task won't be re-run if the taskhash changes, - # but it would result in the same output hash - unihash = self._get_unihash(tid) - if unihash is not None: - return unihash - - return super().get_stampfile_hash(tid) - - def set_unihash(self, tid, unihash): - (mc, fn, taskname, taskfn) = bb.runqueue.split_tid_mcfn(tid) - key = mc + ":" + self.tidtopn[tid] + ":" + taskname - self.unitaskhashes[key] = (self.taskhash[tid], unihash) - self.unihash[tid] = unihash - - def _get_unihash(self, tid, checkkey=None): - if tid not in self.tidtopn: - return None - (mc, fn, taskname, taskfn) = bb.runqueue.split_tid_mcfn(tid) - key = mc + ":" + self.tidtopn[tid] + ":" + taskname - if key not in self.unitaskhashes: - return None - if not checkkey: - checkkey = self.taskhash[tid] - (key, unihash) = self.unitaskhashes[key] - if key != checkkey: - return None - return unihash - - def get_cached_unihash(self, tid): - taskhash = self.taskhash[tid] - - # If its not a setscene task we can return - if self.setscenetasks and tid not in self.setscenetasks: - self.unihash[tid] = None - return taskhash - - # TODO: This cache can grow unbounded. It probably only needs to keep - # for each task - unihash = self._get_unihash(tid) - if unihash is not None: - self.unihash[tid] = unihash - return unihash - - return None - - def _get_method(self, tid): - method = self.method - if tid in self.extramethod: - method = method + self.extramethod[tid] - - return method - - def unihashes_exist(self, query): - if len(query) == 0: - return {} - - query_keys = [] - result = {} - for key, unihash in query.items(): - if unihash in self.unihash_exists_cache: - result[key] = True - else: - query_keys.append(key) - - if query_keys: - with self.client() as client: - query_result = client.unihash_exists_batch(query[k] for k in query_keys) - - for idx, key in enumerate(query_keys): - exists = query_result[idx] - if exists: - self.unihash_exists_cache.add(query[key]) - result[key] = exists - - return result - - def get_unihash(self, tid): - return self.get_unihashes([tid])[tid] - - def get_unihashes(self, tids): - """ - For a iterable of tids, returns a dictionary that maps each tid to a - unihash - """ - result = {} - query_tids = [] - - for tid in tids: - unihash = self.get_cached_unihash(tid) - if unihash: - result[tid] = unihash - else: - query_tids.append(tid) - - if query_tids: - unihashes = [] - try: - with self.client() as client: - unihashes = client.get_unihash_batch((self._get_method(tid), self.taskhash[tid]) for tid in query_tids) - except (ConnectionError, FileNotFoundError, EOFError) as e: - bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e))) - - for idx, tid in enumerate(query_tids): - # In the absence of being able to discover a unique hash from the - # server, make it be equivalent to the taskhash. The unique "hash" only - # really needs to be a unique string (not even necessarily a hash), but - # making it match the taskhash has a few advantages: - # - # 1) All of the sstate code that assumes hashes can be the same - # 2) It provides maximal compatibility with builders that don't use - # an equivalency server - # 3) The value is easy for multiple independent builders to derive the - # same unique hash from the same input. This means that if the - # independent builders find the same taskhash, but it isn't reported - # to the server, there is a better chance that they will agree on - # the unique hash. - taskhash = self.taskhash[tid] - - if unihashes and unihashes[idx]: - unihash = unihashes[idx] - # A unique hash equal to the taskhash is not very interesting, - # so it is reported it at debug level 2. If they differ, that - # is much more interesting, so it is reported at debug level 1 - hashequiv_logger.bbdebug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, tid, self.server)) - else: - hashequiv_logger.debug2('No reported unihash for %s:%s from %s' % (tid, taskhash, self.server)) - unihash = taskhash - - self.set_unihash(tid, unihash) - self.unihash[tid] = unihash - result[tid] = unihash - - return result - - def report_unihash(self, path, task, d): - import importlib - - taskhash = d.getVar('BB_TASKHASH') - unihash = d.getVar('BB_UNIHASH') - report_taskdata = d.getVar('SSTATE_HASHEQUIV_REPORT_TASKDATA') == '1' - tempdir = d.getVar('T') - mcfn = d.getVar('BB_FILENAME') - tid = mcfn + ':do_' + task - key = tid + ':' + taskhash - - if self.setscenetasks and tid not in self.setscenetasks: - return - - # This can happen if locked sigs are in action. Detect and just exit - if taskhash != self.taskhash[tid]: - return - - # Sanity checks - cache_unihash = self._get_unihash(tid, checkkey=taskhash) - if cache_unihash is None: - bb.fatal('%s not in unihash cache. Please report this error' % key) - - if cache_unihash != unihash: - bb.fatal("Cache unihash %s doesn't match BB_UNIHASH %s" % (cache_unihash, unihash)) - - sigfile = None - sigfile_name = "depsig.do_%s.%d" % (task, os.getpid()) - sigfile_link = "depsig.do_%s" % task - - try: - sigfile = open(os.path.join(tempdir, sigfile_name), 'w+b') - - locs = {'path': path, 'sigfile': sigfile, 'task': task, 'd': d} - - if "." in self.method: - (module, method) = self.method.rsplit('.', 1) - locs['method'] = getattr(importlib.import_module(module), method) - outhash = bb.utils.better_eval('method(path, sigfile, task, d)', locs) - else: - outhash = bb.utils.better_eval(self.method + '(path, sigfile, task, d)', locs) - - try: - extra_data = {} - - owner = d.getVar('SSTATE_HASHEQUIV_OWNER') - if owner: - extra_data['owner'] = owner - - if report_taskdata: - sigfile.seek(0) - - extra_data['PN'] = d.getVar('PN') - extra_data['PV'] = d.getVar('PV') - extra_data['PR'] = d.getVar('PR') - extra_data['task'] = task - extra_data['outhash_siginfo'] = sigfile.read().decode('utf-8') - - method = self.method - if tid in self.extramethod: - method = method + self.extramethod[tid] - - with self.client() as client: - data = client.report_unihash(taskhash, method, outhash, unihash, extra_data) - - new_unihash = data['unihash'] - - if new_unihash != unihash: - hashequiv_logger.debug('Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server)) - bb.event.fire(bb.runqueue.taskUniHashUpdate(mcfn + ':do_' + task, new_unihash), d) - self.set_unihash(tid, new_unihash) - d.setVar('BB_UNIHASH', new_unihash) - else: - hashequiv_logger.debug('Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server)) - except (ConnectionError, FileNotFoundError, EOFError) as e: - bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e))) - finally: - if sigfile: - sigfile.close() - - sigfile_link_path = os.path.join(tempdir, sigfile_link) - bb.utils.remove(sigfile_link_path) - - try: - os.symlink(sigfile_name, sigfile_link_path) - except OSError: - pass - - def report_unihash_equiv(self, tid, taskhash, wanted_unihash, current_unihash, datacaches): - try: - extra_data = {} - method = self.method - if tid in self.extramethod: - method = method + self.extramethod[tid] - - with self.client() as client: - data = client.report_unihash_equiv(taskhash, method, wanted_unihash, extra_data) - - hashequiv_logger.verbose('Reported task %s as unihash %s to %s (%s)' % (tid, wanted_unihash, self.server, str(data))) - - if data is None: - bb.warn("Server unable to handle unihash report") - return False - - finalunihash = data['unihash'] - - if finalunihash == current_unihash: - hashequiv_logger.verbose('Task %s unihash %s unchanged by server' % (tid, finalunihash)) - elif finalunihash == wanted_unihash: - hashequiv_logger.verbose('Task %s unihash changed %s -> %s as wanted' % (tid, current_unihash, finalunihash)) - self.set_unihash(tid, finalunihash) - return True - else: - # TODO: What to do here? - hashequiv_logger.verbose('Task %s unihash reported as unwanted hash %s' % (tid, finalunihash)) - - except (ConnectionError, FileNotFoundError, EOFError) as e: - bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e))) - - return False - -# -# Dummy class used for bitbake-selftest -# -class SignatureGeneratorTestEquivHash(SignatureGeneratorUniHashMixIn, SignatureGeneratorBasicHash): - name = "TestEquivHash" - def init_rundepcheck(self, data): - super().init_rundepcheck(data) - self.server = data.getVar('BB_HASHSERVE') - self.method = "sstate_output_hash" - -def clean_checksum_file_path(file_checksum_tuple): - f, cs = file_checksum_tuple - if "/./" in f: - return "./" + f.split("/./")[1] - return os.path.basename(f) - -def dump_this_task(outfile, d): - import bb.parse - mcfn = d.getVar("BB_FILENAME") - task = "do_" + d.getVar("BB_CURRENTTASK") - referencestamp = bb.parse.siggen.stampfile_base(mcfn) - bb.parse.siggen.dump_sigtask(mcfn, task, outfile, "customfile:" + referencestamp) - -def init_colors(enable_color): - """Initialise colour dict for passing to compare_sigfiles()""" - # First set up the colours - colors = {'color_title': '\033[1m', - 'color_default': '\033[0m', - 'color_add': '\033[0;32m', - 'color_remove': '\033[0;31m', - } - # Leave all keys present but clear the values - if not enable_color: - for k in colors.keys(): - colors[k] = '' - return colors - -def worddiff_str(oldstr, newstr, colors=None): - if not colors: - colors = init_colors(False) - diff = simplediff.diff(oldstr.split(' '), newstr.split(' ')) - ret = [] - for change, value in diff: - value = ' '.join(value) - if change == '=': - ret.append(value) - elif change == '+': - item = '{color_add}{{+{value}+}}{color_default}'.format(value=value, **colors) - ret.append(item) - elif change == '-': - item = '{color_remove}[-{value}-]{color_default}'.format(value=value, **colors) - ret.append(item) - whitespace_note = '' - if oldstr != newstr and ' '.join(oldstr.split()) == ' '.join(newstr.split()): - whitespace_note = ' (whitespace changed)' - return '"%s"%s' % (' '.join(ret), whitespace_note) - -def list_inline_diff(oldlist, newlist, colors=None): - if not colors: - colors = init_colors(False) - diff = simplediff.diff(oldlist, newlist) - ret = [] - for change, value in diff: - value = ' '.join(value) - if change == '=': - ret.append("'%s'" % value) - elif change == '+': - item = '{color_add}+{value}{color_default}'.format(value=value, **colors) - ret.append(item) - elif change == '-': - item = '{color_remove}-{value}{color_default}'.format(value=value, **colors) - ret.append(item) - return '[%s]' % (', '.join(ret)) - -# Handled renamed fields -def handle_renames(data): - if 'basewhitelist' in data: - data['basehash_ignore_vars'] = data['basewhitelist'] - del data['basewhitelist'] - if 'taskwhitelist' in data: - data['taskhash_ignore_tasks'] = data['taskwhitelist'] - del data['taskwhitelist'] - - -def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False): - output = [] - - colors = init_colors(color) - def color_format(formatstr, **values): - """ - Return colour formatted string. - NOTE: call with the format string, not an already formatted string - containing values (otherwise you could have trouble with { and } - characters) - """ - if not formatstr.endswith('{color_default}'): - formatstr += '{color_default}' - # In newer python 3 versions you can pass both of these directly, - # but we only require 3.4 at the moment - formatparams = {} - formatparams.update(colors) - formatparams.update(values) - return formatstr.format(**formatparams) - - try: - with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f: - a_data = json.load(f, object_hook=SetDecoder) - except (TypeError, OSError) as err: - bb.error("Failed to open sigdata file '%s': %s" % (a, str(err))) - raise err - try: - with bb.compress.zstd.open(b, "rt", encoding="utf-8", num_threads=1) as f: - b_data = json.load(f, object_hook=SetDecoder) - except (TypeError, OSError) as err: - bb.error("Failed to open sigdata file '%s': %s" % (b, str(err))) - raise err - - for data in [a_data, b_data]: - handle_renames(data) - - def dict_diff(a, b, ignored_vars=set()): - sa = set(a.keys()) - sb = set(b.keys()) - common = sa & sb - changed = set() - for i in common: - if a[i] != b[i] and i not in ignored_vars: - changed.add(i) - added = sb - sa - removed = sa - sb - return changed, added, removed - - def file_checksums_diff(a, b): - from collections import Counter - - # Convert lists back to tuples - a = [(f[0], f[1]) for f in a] - b = [(f[0], f[1]) for f in b] - - # Compare lists, ensuring we can handle duplicate filenames if they exist - removedcount = Counter(a) - removedcount.subtract(b) - addedcount = Counter(b) - addedcount.subtract(a) - added = [] - for x in b: - if addedcount[x] > 0: - addedcount[x] -= 1 - added.append(x) - removed = [] - changed = [] - for x in a: - if removedcount[x] > 0: - removedcount[x] -= 1 - for y in added: - if y[0] == x[0]: - changed.append((x[0], x[1], y[1])) - added.remove(y) - break - else: - removed.append(x) - added = [x[0] for x in added] - removed = [x[0] for x in removed] - return changed, added, removed - - if 'basehash_ignore_vars' in a_data and a_data['basehash_ignore_vars'] != b_data['basehash_ignore_vars']: - output.append(color_format("{color_title}basehash_ignore_vars changed{color_default} from '%s' to '%s'") % (a_data['basehash_ignore_vars'], b_data['basehash_ignore_vars'])) - if a_data['basehash_ignore_vars'] and b_data['basehash_ignore_vars']: - output.append("changed items: %s" % a_data['basehash_ignore_vars'].symmetric_difference(b_data['basehash_ignore_vars'])) - - if 'taskhash_ignore_tasks' in a_data and a_data['taskhash_ignore_tasks'] != b_data['taskhash_ignore_tasks']: - output.append(color_format("{color_title}taskhash_ignore_tasks changed{color_default} from '%s' to '%s'") % (a_data['taskhash_ignore_tasks'], b_data['taskhash_ignore_tasks'])) - if a_data['taskhash_ignore_tasks'] and b_data['taskhash_ignore_tasks']: - output.append("changed items: %s" % a_data['taskhash_ignore_tasks'].symmetric_difference(b_data['taskhash_ignore_tasks'])) - - if a_data['taskdeps'] != b_data['taskdeps']: - output.append(color_format("{color_title}Task dependencies changed{color_default} from:\n%s\nto:\n%s") % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps']))) - - if a_data['basehash'] != b_data['basehash'] and not collapsed: - output.append(color_format("{color_title}basehash changed{color_default} from %s to %s") % (a_data['basehash'], b_data['basehash'])) - - changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basehash_ignore_vars'] & b_data['basehash_ignore_vars']) - if changed: - for dep in sorted(changed): - output.append(color_format("{color_title}List of dependencies for variable %s changed from '{color_default}%s{color_title}' to '{color_default}%s{color_title}'") % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep])) - if a_data['gendeps'][dep] and b_data['gendeps'][dep]: - output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep])) - if added: - for dep in sorted(added): - output.append(color_format("{color_title}Dependency on variable %s was added") % (dep)) - if removed: - for dep in sorted(removed): - output.append(color_format("{color_title}Dependency on Variable %s was removed") % (dep)) - - - changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals']) - if changed: - for dep in sorted(changed): - oldval = a_data['varvals'][dep] - newval = b_data['varvals'][dep] - if newval and oldval and ('\n' in oldval or '\n' in newval): - diff = difflib.unified_diff(oldval.splitlines(), newval.splitlines(), lineterm='') - # Cut off the first two lines, since we aren't interested in - # the old/new filename (they are blank anyway in this case) - difflines = list(diff)[2:] - if color: - # Add colour to diff output - for i, line in enumerate(difflines): - if line.startswith('+'): - line = color_format('{color_add}{line}', line=line) - difflines[i] = line - elif line.startswith('-'): - line = color_format('{color_remove}{line}', line=line) - difflines[i] = line - output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff='\n'.join(difflines))) - elif newval and oldval and (' ' in oldval or ' ' in newval): - output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff=worddiff_str(oldval, newval, colors))) - else: - output.append(color_format("{color_title}Variable {var} value changed from '{color_default}{oldval}{color_title}' to '{color_default}{newval}{color_title}'{color_default}", var=dep, oldval=oldval, newval=newval)) - - if not 'file_checksum_values' in a_data: - a_data['file_checksum_values'] = [] - if not 'file_checksum_values' in b_data: - b_data['file_checksum_values'] = [] - - changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values']) - if changed: - for f, old, new in changed: - output.append(color_format("{color_title}Checksum for file %s changed{color_default} from %s to %s") % (f, old, new)) - if added: - for f in added: - output.append(color_format("{color_title}Dependency on checksum of file %s was added") % (f)) - if removed: - for f in removed: - output.append(color_format("{color_title}Dependency on checksum of file %s was removed") % (f)) - - if not 'runtaskdeps' in a_data: - a_data['runtaskdeps'] = {} - if not 'runtaskdeps' in b_data: - b_data['runtaskdeps'] = {} - - if not collapsed: - if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']): - changed = ["Number of task dependencies changed"] - else: - changed = [] - for idx, task in enumerate(a_data['runtaskdeps']): - a = a_data['runtaskdeps'][idx] - b = b_data['runtaskdeps'][idx] - if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b] and not collapsed: - changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b])) - - if changed: - clean_a = a_data['runtaskdeps'] - clean_b = b_data['runtaskdeps'] - if clean_a != clean_b: - output.append(color_format("{color_title}runtaskdeps changed:{color_default}\n%s") % list_inline_diff(clean_a, clean_b, colors)) - else: - output.append(color_format("{color_title}runtaskdeps changed:")) - output.append("\n".join(changed)) - - - if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data: - a = a_data['runtaskhashes'] - b = b_data['runtaskhashes'] - changed, added, removed = dict_diff(a, b) - if added: - for dep in sorted(added): - bdep_found = False - if removed: - for bdep in removed: - if b[dep] == a[bdep]: - #output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep)) - bdep_found = True - if not bdep_found: - output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (dep, b[dep])) - if removed: - for dep in sorted(removed): - adep_found = False - if added: - for adep in added: - if b[adep] == a[dep]: - #output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep)) - adep_found = True - if not adep_found: - output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (dep, a[dep])) - if changed: - for dep in sorted(changed): - if not collapsed: - output.append(color_format("{color_title}Hash for task dependency %s changed{color_default} from %s to %s") % (dep, a[dep], b[dep])) - if callable(recursecb): - recout = recursecb(dep, a[dep], b[dep]) - if recout: - if collapsed: - output.extend(recout) - else: - # If a dependent hash changed, might as well print the line above and then defer to the changes in - # that hash since in all likelyhood, they're the same changes this task also saw. - output = [output[-1]] + recout - break - - a_taint = a_data.get('taint', None) - b_taint = b_data.get('taint', None) - if a_taint != b_taint: - if a_taint and a_taint.startswith('nostamp:'): - a_taint = a_taint.replace('nostamp:', 'nostamp(uuid4):') - if b_taint and b_taint.startswith('nostamp:'): - b_taint = b_taint.replace('nostamp:', 'nostamp(uuid4):') - output.append(color_format("{color_title}Taint (by forced/invalidated task) changed{color_default} from %s to %s") % (a_taint, b_taint)) - - return output - - -def calc_basehash(sigdata): - task = sigdata['task'] - basedata = sigdata['varvals'][task] - - if basedata is None: - basedata = '' - - alldeps = sigdata['taskdeps'] - for dep in sorted(alldeps): - basedata = basedata + dep - val = sigdata['varvals'][dep] - if val is not None: - basedata = basedata + str(val) - - return hashlib.sha256(basedata.encode("utf-8")).hexdigest() - -def calc_taskhash(sigdata): - data = sigdata['basehash'] - - for dep in sigdata['runtaskdeps']: - data = data + sigdata['runtaskhashes'][dep] - - for c in sigdata['file_checksum_values']: - if c[1]: - if "./" in c[0]: - data = data + c[0] - data = data + c[1] - - if 'taint' in sigdata: - if 'nostamp:' in sigdata['taint']: - data = data + sigdata['taint'][8:] - else: - data = data + sigdata['taint'] - - return hashlib.sha256(data.encode("utf-8")).hexdigest() - - -def dump_sigfile(a): - output = [] - - try: - with bb.compress.zstd.open(a, "rt", encoding="utf-8", num_threads=1) as f: - a_data = json.load(f, object_hook=SetDecoder) - except (TypeError, OSError) as err: - bb.error("Failed to open sigdata file '%s': %s" % (a, str(err))) - raise err - - handle_renames(a_data) - - output.append("basehash_ignore_vars: %s" % (sorted(a_data['basehash_ignore_vars']))) - - output.append("taskhash_ignore_tasks: %s" % (sorted(a_data['taskhash_ignore_tasks'] or []))) - - output.append("Task dependencies: %s" % (sorted(a_data['taskdeps']))) - - output.append("basehash: %s" % (a_data['basehash'])) - - for dep in sorted(a_data['gendeps']): - output.append("List of dependencies for variable %s is %s" % (dep, sorted(a_data['gendeps'][dep]))) - - for dep in sorted(a_data['varvals']): - output.append("Variable %s value is %s" % (dep, a_data['varvals'][dep])) - - if 'runtaskdeps' in a_data: - output.append("Tasks this task depends on: %s" % (sorted(a_data['runtaskdeps']))) - - if 'file_checksum_values' in a_data: - output.append("This task depends on the checksums of files: %s" % (sorted(a_data['file_checksum_values']))) - - if 'runtaskhashes' in a_data: - for dep in sorted(a_data['runtaskhashes']): - output.append("Hash for dependent task %s is %s" % (dep, a_data['runtaskhashes'][dep])) - - if 'taint' in a_data: - if a_data['taint'].startswith('nostamp:'): - msg = a_data['taint'].replace('nostamp:', 'nostamp(uuid4):') - else: - msg = a_data['taint'] - output.append("Tainted (by forced/invalidated task): %s" % msg) - - if 'task' in a_data: - computed_basehash = calc_basehash(a_data) - output.append("Computed base hash is %s and from file %s" % (computed_basehash, a_data['basehash'])) - else: - output.append("Unable to compute base hash") - - computed_taskhash = calc_taskhash(a_data) - output.append("Computed task hash is %s" % computed_taskhash) - - return output diff --git a/bitbake/lib/bb/taskdata.py b/bitbake/lib/bb/taskdata.py deleted file mode 100644 index 66545a65af..0000000000 --- a/bitbake/lib/bb/taskdata.py +++ /dev/null @@ -1,586 +0,0 @@ -""" -BitBake 'TaskData' implementation - -Task data collection and handling - -""" - -# Copyright (C) 2006 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import logging -import re -import bb - -logger = logging.getLogger("BitBake.TaskData") - -def re_match_strings(target, strings): - """ - Whether or not the string 'target' matches - any one string of the strings which can be regular expression string - """ - for name in strings: - if name.startswith("^") or name.endswith("$"): - if re.match(name, target): - return True - elif name == target: - return True - return False - -class TaskEntry: - def __init__(self): - self.tdepends = [] - self.idepends = [] - self.irdepends = [] - -class TaskData: - """ - BitBake Task Data implementation - """ - def __init__(self, halt = True, skiplist = None, allowincomplete = False): - self.build_targets = {} - self.run_targets = {} - - self.external_targets = [] - - self.seenfns = [] - self.taskentries = {} - - self.depids = {} - self.rdepids = {} - - self.consider_msgs_cache = [] - - self.failed_deps = [] - self.failed_rdeps = [] - self.failed_fns = [] - - self.halt = halt - self.allowincomplete = allowincomplete - - self.skiplist = skiplist - - self.mcdepends = [] - - def add_tasks(self, fn, dataCache): - """ - Add tasks for a given fn to the database - """ - - task_deps = dataCache.task_deps[fn] - - if fn in self.failed_fns: - bb.msg.fatal("TaskData", "Trying to re-add a failed file? Something is broken...") - - # Check if we've already seen this fn - if fn in self.seenfns: - return - - self.seenfns.append(fn) - - self.add_extra_deps(fn, dataCache) - - def add_mcdepends(task): - for dep in task_deps['mcdepends'][task].split(): - if len(dep.split(':')) != 5: - bb.msg.fatal("TaskData", "Error for %s:%s[%s], multiconfig dependency %s does not contain exactly four ':' characters.\n Task '%s' should be specified in the form 'mc:fromMC:toMC:packagename:task'" % (fn, task, 'mcdepends', dep, 'mcdepends')) - if dep not in self.mcdepends: - self.mcdepends.append(dep) - - # Common code for dep_name/depends = 'depends'/idepends and 'rdepends'/irdepends - def handle_deps(task, dep_name, depends, seen): - if dep_name in task_deps and task in task_deps[dep_name]: - ids = [] - for dep in task_deps[dep_name][task].split(): - if dep: - parts = dep.split(":") - if len(parts) != 2: - bb.msg.fatal("TaskData", "Error for %s:%s[%s], dependency %s in '%s' does not contain exactly one ':' character.\n Task '%s' should be specified in the form 'packagename:task'" % (fn, task, dep_name, dep, task_deps[dep_name][task], dep_name)) - ids.append((parts[0], parts[1])) - seen(parts[0]) - depends.extend(ids) - - for task in task_deps['tasks']: - - tid = "%s:%s" % (fn, task) - self.taskentries[tid] = TaskEntry() - - # Work out task dependencies - parentids = [] - for dep in task_deps['parents'][task]: - if dep not in task_deps['tasks']: - bb.debug(2, "Not adding dependency of %s on %s since %s does not exist" % (task, dep, dep)) - continue - parentid = "%s:%s" % (fn, dep) - parentids.append(parentid) - self.taskentries[tid].tdepends.extend(parentids) - - - # Touch all intertask dependencies - handle_deps(task, 'depends', self.taskentries[tid].idepends, self.seen_build_target) - handle_deps(task, 'rdepends', self.taskentries[tid].irdepends, self.seen_run_target) - - if 'mcdepends' in task_deps and task in task_deps['mcdepends']: - add_mcdepends(task) - - # Work out build dependencies - if not fn in self.depids: - dependids = set() - for depend in dataCache.deps[fn]: - dependids.add(depend) - self.depids[fn] = list(dependids) - logger.debug2("Added dependencies %s for %s", str(dataCache.deps[fn]), fn) - - # Work out runtime dependencies - if not fn in self.rdepids: - rdependids = set() - rdepends = dataCache.rundeps[fn] - rrecs = dataCache.runrecs[fn] - rdependlist = [] - rreclist = [] - for package in rdepends: - for rdepend in rdepends[package]: - rdependlist.append(rdepend) - rdependids.add(rdepend) - for package in rrecs: - for rdepend in rrecs[package]: - rreclist.append(rdepend) - rdependids.add(rdepend) - if rdependlist: - logger.debug2("Added runtime dependencies %s for %s", str(rdependlist), fn) - if rreclist: - logger.debug2("Added runtime recommendations %s for %s", str(rreclist), fn) - self.rdepids[fn] = list(rdependids) - - for dep in self.depids[fn]: - self.seen_build_target(dep) - if dep in self.failed_deps: - self.fail_fn(fn) - return - for dep in self.rdepids[fn]: - self.seen_run_target(dep) - if dep in self.failed_rdeps: - self.fail_fn(fn) - return - - def add_extra_deps(self, fn, dataCache): - func = dataCache.extradepsfunc.get(fn, None) - if func: - bb.providers.buildWorldTargetList(dataCache) - pn = dataCache.pkg_fn[fn] - params = {'deps': dataCache.deps[fn], - 'world_target': dataCache.world_target, - 'pkg_pn': dataCache.pkg_pn, - 'self_pn': pn} - funcname = '_%s_calculate_extra_depends' % pn.replace('-', '_') - paramlist = ','.join(params.keys()) - func = 'def %s(%s):\n%s\n\n%s(%s)' % (funcname, paramlist, func, funcname, paramlist) - bb.utils.better_exec(func, params) - - - def have_build_target(self, target): - """ - Have we a build target matching this name? - """ - if target in self.build_targets and self.build_targets[target]: - return True - return False - - def have_runtime_target(self, target): - """ - Have we a runtime target matching this name? - """ - if target in self.run_targets and self.run_targets[target]: - return True - return False - - def seen_build_target(self, name): - """ - Maintain a list of build targets - """ - if name not in self.build_targets: - self.build_targets[name] = [] - - def add_build_target(self, fn, item): - """ - Add a build target. - If already present, append the provider fn to the list - """ - if item in self.build_targets: - if fn in self.build_targets[item]: - return - self.build_targets[item].append(fn) - return - self.build_targets[item] = [fn] - - def seen_run_target(self, name): - """ - Maintain a list of runtime build targets - """ - if name not in self.run_targets: - self.run_targets[name] = [] - - def add_runtime_target(self, fn, item): - """ - Add a runtime target. - If already present, append the provider fn to the list - """ - if item in self.run_targets: - if fn in self.run_targets[item]: - return - self.run_targets[item].append(fn) - return - self.run_targets[item] = [fn] - - def mark_external_target(self, target): - """ - Mark a build target as being externally requested - """ - if target not in self.external_targets: - self.external_targets.append(target) - - def get_unresolved_build_targets(self, dataCache): - """ - Return a list of build targets who's providers - are unknown. - """ - unresolved = [] - for target in self.build_targets: - if re_match_strings(target, dataCache.ignored_dependencies): - continue - if target in self.failed_deps: - continue - if not self.build_targets[target]: - unresolved.append(target) - return unresolved - - def get_unresolved_run_targets(self, dataCache): - """ - Return a list of runtime targets who's providers - are unknown. - """ - unresolved = [] - for target in self.run_targets: - if re_match_strings(target, dataCache.ignored_dependencies): - continue - if target in self.failed_rdeps: - continue - if not self.run_targets[target]: - unresolved.append(target) - return unresolved - - def get_provider(self, item): - """ - Return a list of providers of item - """ - return self.build_targets[item] - - def get_dependees(self, item): - """ - Return a list of targets which depend on item - """ - dependees = [] - for fn in self.depids: - if item in self.depids[fn]: - dependees.append(fn) - return dependees - - def get_rdependees(self, item): - """ - Return a list of targets which depend on runtime item - """ - dependees = [] - for fn in self.rdepids: - if item in self.rdepids[fn]: - dependees.append(fn) - return dependees - - def get_reasons(self, item, runtime=False): - """ - Get the reason(s) for an item not being provided, if any - """ - reasons = [] - if self.skiplist: - for fn in self.skiplist: - skipitem = self.skiplist[fn] - if skipitem.pn == item: - reasons.append("%s was skipped: %s" % (skipitem.pn, skipitem.skipreason)) - elif runtime and item in skipitem.rprovides: - reasons.append("%s RPROVIDES %s but was skipped: %s" % (skipitem.pn, item, skipitem.skipreason)) - elif not runtime and item in skipitem.provides: - reasons.append("%s PROVIDES %s but was skipped: %s" % (skipitem.pn, item, skipitem.skipreason)) - return reasons - - def get_close_matches(self, item, provider_list): - import difflib - if self.skiplist: - skipped = [] - for fn in self.skiplist: - skipped.append(self.skiplist[fn].pn) - full_list = provider_list + skipped - else: - full_list = provider_list - return difflib.get_close_matches(item, full_list, cutoff=0.7) - - def add_provider(self, cfgData, dataCache, item): - try: - self.add_provider_internal(cfgData, dataCache, item) - except bb.providers.NoProvider: - if self.halt: - raise - self.remove_buildtarget(item) - - self.mark_external_target(item) - - def add_provider_internal(self, cfgData, dataCache, item): - """ - Add the providers of item to the task data - Mark entries were specifically added externally as against dependencies - added internally during dependency resolution - """ - - if re_match_strings(item, dataCache.ignored_dependencies): - return - - if not item in dataCache.providers: - close_matches = self.get_close_matches(item, list(dataCache.providers.keys())) - # Is it in RuntimeProviders ? - all_p = bb.providers.getRuntimeProviders(dataCache, item) - for fn in all_p: - new = dataCache.pkg_fn[fn] + " RPROVIDES " + item - if new not in close_matches: - close_matches.append(new) - bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees(item), reasons=self.get_reasons(item), close_matches=close_matches), cfgData) - raise bb.providers.NoProvider(item) - - if self.have_build_target(item): - return - - all_p = dataCache.providers[item] - - eligible, foundUnique = bb.providers.filterProviders(all_p, item, cfgData, dataCache) - eligible = [p for p in eligible if not p in self.failed_fns] - - if not eligible: - bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees(item), reasons=["No eligible PROVIDERs exist for '%s'" % item]), cfgData) - raise bb.providers.NoProvider(item) - - if len(eligible) > 1 and not foundUnique: - if item not in self.consider_msgs_cache: - providers_list = [] - for fn in eligible: - providers_list.append(dataCache.pkg_fn[fn]) - bb.event.fire(bb.event.MultipleProviders(item, providers_list), cfgData) - self.consider_msgs_cache.append(item) - - for fn in eligible: - if fn in self.failed_fns: - continue - logger.debug2("adding %s to satisfy %s", fn, item) - self.add_build_target(fn, item) - self.add_tasks(fn, dataCache) - - - #item = dataCache.pkg_fn[fn] - - def add_rprovider(self, cfgData, dataCache, item): - """ - Add the runtime providers of item to the task data - (takes item names from RDEPENDS/PACKAGES namespace) - """ - - if re_match_strings(item, dataCache.ignored_dependencies): - return - - if self.have_runtime_target(item): - return - - all_p = bb.providers.getRuntimeProviders(dataCache, item) - - if not all_p: - bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees(item), reasons=self.get_reasons(item, True)), cfgData) - raise bb.providers.NoRProvider(item) - - eligible, numberPreferred = bb.providers.filterProvidersRunTime(all_p, item, cfgData, dataCache) - eligible = [p for p in eligible if not p in self.failed_fns] - - if not eligible: - bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees(item), reasons=["No eligible RPROVIDERs exist for '%s'" % item]), cfgData) - raise bb.providers.NoRProvider(item) - - if len(eligible) > 1 and numberPreferred == 0: - if item not in self.consider_msgs_cache: - providers_list = [] - for fn in eligible: - providers_list.append(dataCache.pkg_fn[fn]) - bb.event.fire(bb.event.MultipleProviders(item, providers_list, runtime=True), cfgData) - self.consider_msgs_cache.append(item) - - if numberPreferred > 1: - if item not in self.consider_msgs_cache: - providers_list = [] - for fn in eligible: - providers_list.append(dataCache.pkg_fn[fn]) - bb.event.fire(bb.event.MultipleProviders(item, providers_list, runtime=True), cfgData) - self.consider_msgs_cache.append(item) - raise bb.providers.MultipleRProvider(item) - - # run through the list until we find one that we can build - for fn in eligible: - if fn in self.failed_fns: - continue - logger.debug2("adding '%s' to satisfy runtime '%s'", fn, item) - self.add_runtime_target(fn, item) - self.add_tasks(fn, dataCache) - - def fail_fn(self, fn, missing_list=None): - """ - Mark a file as failed (unbuildable) - Remove any references from build and runtime provider lists - - missing_list, A list of missing requirements for this target - """ - if fn in self.failed_fns: - return - if not missing_list: - missing_list = [] - logger.debug("File '%s' is unbuildable, removing...", fn) - self.failed_fns.append(fn) - for target in self.build_targets: - if fn in self.build_targets[target]: - self.build_targets[target].remove(fn) - if not self.build_targets[target]: - self.remove_buildtarget(target, missing_list) - for target in self.run_targets: - if fn in self.run_targets[target]: - self.run_targets[target].remove(fn) - if not self.run_targets[target]: - self.remove_runtarget(target, missing_list) - - def remove_buildtarget(self, target, missing_list=None): - """ - Mark a build target as failed (unbuildable) - Trigger removal of any files that have this as a dependency - """ - if not missing_list: - missing_list = [target] - else: - missing_list = [target] + missing_list - logger.verbose("Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", target, missing_list) - self.failed_deps.append(target) - dependees = self.get_dependees(target) - for fn in dependees: - self.fail_fn(fn, missing_list) - for tid in self.taskentries: - for (idepend, idependtask) in self.taskentries[tid].idepends: - if idepend == target: - fn = tid.rsplit(":",1)[0] - self.fail_fn(fn, missing_list) - - if self.halt and target in self.external_targets: - logger.error("Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s", target, missing_list) - raise bb.providers.NoProvider(target) - - def remove_runtarget(self, target, missing_list=None): - """ - Mark a run target as failed (unbuildable) - Trigger removal of any files that have this as a dependency - """ - if not missing_list: - missing_list = [target] - else: - missing_list = [target] + missing_list - - logger.info("Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", target, missing_list) - self.failed_rdeps.append(target) - dependees = self.get_rdependees(target) - for fn in dependees: - self.fail_fn(fn, missing_list) - for tid in self.taskentries: - for (idepend, idependtask) in self.taskentries[tid].irdepends: - if idepend == target: - fn = tid.rsplit(":",1)[0] - self.fail_fn(fn, missing_list) - - def add_unresolved(self, cfgData, dataCache): - """ - Resolve all unresolved build and runtime targets - """ - logger.info("Resolving any missing task queue dependencies") - while True: - added = 0 - for target in self.get_unresolved_build_targets(dataCache): - try: - self.add_provider_internal(cfgData, dataCache, target) - added = added + 1 - except bb.providers.NoProvider: - if self.halt and target in self.external_targets and not self.allowincomplete: - raise - if not self.allowincomplete: - self.remove_buildtarget(target) - for target in self.get_unresolved_run_targets(dataCache): - try: - self.add_rprovider(cfgData, dataCache, target) - added = added + 1 - except (bb.providers.NoRProvider, bb.providers.MultipleRProvider): - self.remove_runtarget(target) - logger.debug("Resolved " + str(added) + " extra dependencies") - if added == 0: - break - # self.dump_data() - - def get_providermap(self, prefix=None): - provmap = {} - for name in self.build_targets: - if prefix and not name.startswith(prefix): - continue - if self.have_build_target(name): - provider = self.get_provider(name) - if provider: - provmap[name] = provider[0] - return provmap - - def get_mcdepends(self): - return self.mcdepends - - def dump_data(self): - """ - Dump some debug information on the internal data structures - """ - logger.debug3("build_names:") - logger.debug3(", ".join(self.build_targets)) - - logger.debug3("run_names:") - logger.debug3(", ".join(self.run_targets)) - - logger.debug3("build_targets:") - for target in self.build_targets: - targets = "None" - if target in self.build_targets: - targets = self.build_targets[target] - logger.debug3(" %s: %s", target, targets) - - logger.debug3("run_targets:") - for target in self.run_targets: - targets = "None" - if target in self.run_targets: - targets = self.run_targets[target] - logger.debug3(" %s: %s", target, targets) - - logger.debug3("tasks:") - for tid in self.taskentries: - logger.debug3(" %s: %s %s %s", - tid, - self.taskentries[tid].idepends, - self.taskentries[tid].irdepends, - self.taskentries[tid].tdepends) - - logger.debug3("dependency ids (per fn):") - for fn in self.depids: - logger.debug3(" %s: %s", fn, self.depids[fn]) - - logger.debug3("runtime dependency ids (per fn):") - for fn in self.rdepids: - logger.debug3(" %s: %s", fn, self.rdepids[fn]) diff --git a/bitbake/lib/bb/tests/__init__.py b/bitbake/lib/bb/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/bitbake/lib/bb/tests/codeparser.py b/bitbake/lib/bb/tests/codeparser.py deleted file mode 100644 index c0d1362a0c..0000000000 --- a/bitbake/lib/bb/tests/codeparser.py +++ /dev/null @@ -1,512 +0,0 @@ -# -# BitBake Test for codeparser.py -# -# Copyright (C) 2010 Chris Larson -# Copyright (C) 2012 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import unittest -import logging -import bb - -logger = logging.getLogger('BitBake.TestCodeParser') - -# bb.data references bb.parse but can't directly import due to circular dependencies. -# Hack around it for now :( -import bb.parse -import bb.data - -class ReferenceTest(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - - def setEmptyVars(self, varlist): - for k in varlist: - self.d.setVar(k, "") - - def setValues(self, values): - for k, v in values.items(): - self.d.setVar(k, v) - - def assertReferences(self, refs): - self.assertEqual(self.references, refs) - - def assertExecs(self, execs): - self.assertEqual(self.execs, execs) - - def assertContains(self, contains): - self.assertEqual(self.contains, contains) - -class VariableReferenceTest(ReferenceTest): - - def parseExpression(self, exp): - parsedvar = self.d.expandWithRefs(exp, None) - self.references = parsedvar.references - self.execs = parsedvar.execs - - def test_simple_reference(self): - self.setEmptyVars(["FOO"]) - self.parseExpression("${FOO}") - self.assertReferences(set(["FOO"])) - - def test_nested_reference(self): - self.setEmptyVars(["BAR"]) - self.d.setVar("FOO", "BAR") - self.parseExpression("${${FOO}}") - self.assertReferences(set(["FOO", "BAR"])) - - def test_python_reference(self): - self.setEmptyVars(["BAR"]) - self.parseExpression("${@d.getVar('BAR') + 'foo'}") - self.assertReferences(set(["BAR"])) - - def test_python_exec_reference(self): - self.parseExpression("${@eval('3 * 5')}") - self.assertReferences(set()) - self.assertExecs(set(["eval"])) - -class ShellReferenceTest(ReferenceTest): - - def parseExpression(self, exp): - parsedvar = self.d.expandWithRefs(exp, None) - parser = bb.codeparser.ShellParser("ParserTest", logger) - parser.parse_shell(parsedvar.value) - - self.references = parsedvar.references - self.execs = parser.execs - - def test_quotes_inside_assign(self): - self.parseExpression('foo=foo"bar"baz') - self.assertReferences(set([])) - - def test_quotes_inside_arg(self): - self.parseExpression('sed s#"bar baz"#"alpha beta"#g') - self.assertExecs(set(["sed"])) - - def test_arg_continuation(self): - self.parseExpression("sed -i -e s,foo,bar,g \\\n *.pc") - self.assertExecs(set(["sed"])) - - def test_dollar_in_quoted(self): - self.parseExpression('sed -i -e "foo$" *.pc') - self.assertExecs(set(["sed"])) - - def test_quotes_inside_arg_continuation(self): - self.setEmptyVars(["bindir", "D", "libdir"]) - self.parseExpression(""" -sed -i -e s#"moc_location=.*$"#"moc_location=${bindir}/moc4"# \\ --e s#"uic_location=.*$"#"uic_location=${bindir}/uic4"# \\ -${D}${libdir}/pkgconfig/*.pc -""") - self.assertReferences(set(["bindir", "D", "libdir"])) - - def test_assign_subshell_expansion(self): - self.parseExpression("foo=$(echo bar)") - self.assertExecs(set(["echo"])) - - def test_assign_subshell_expansion_quotes(self): - self.parseExpression('foo="$(echo bar)"') - self.assertExecs(set(["echo"])) - - def test_assign_subshell_expansion_nested(self): - self.parseExpression('foo="$(func1 "$(func2 bar$(func3))")"') - self.assertExecs(set(["func1", "func2", "func3"])) - - def test_assign_subshell_expansion_multiple(self): - self.parseExpression('foo="$(func1 "$(func2)") $(func3)"') - self.assertExecs(set(["func1", "func2", "func3"])) - - def test_assign_subshell_expansion_escaped_quotes(self): - self.parseExpression('foo="\\"fo\\"o$(func1)"') - self.assertExecs(set(["func1"])) - - def test_assign_subshell_expansion_empty(self): - self.parseExpression('foo="bar$()foo"') - self.assertExecs(set()) - - def test_assign_subshell_backticks(self): - self.parseExpression("foo=`echo bar`") - self.assertExecs(set(["echo"])) - - def test_assign_subshell_backticks_quotes(self): - self.parseExpression('foo="`echo bar`"') - self.assertExecs(set(["echo"])) - - def test_assign_subshell_backticks_multiple(self): - self.parseExpression('foo="`func1 bar` `func2`"') - self.assertExecs(set(["func1", "func2"])) - - def test_assign_subshell_backticks_escaped_quotes(self): - self.parseExpression('foo="\\"fo\\"o`func1`"') - self.assertExecs(set(["func1"])) - - def test_assign_subshell_backticks_empty(self): - self.parseExpression('foo="bar``foo"') - self.assertExecs(set()) - - def test_shell_unexpanded(self): - self.setEmptyVars(["QT_BASE_NAME"]) - self.parseExpression('echo "${QT_BASE_NAME}"') - self.assertExecs(set(["echo"])) - self.assertReferences(set(["QT_BASE_NAME"])) - - def test_incomplete_varexp_single_quotes(self): - self.parseExpression("sed -i -e 's:IP{:I${:g' $pc") - self.assertExecs(set(["sed"])) - - def test_parameter_expansion_modifiers(self): - # -,+ and : are also valid modifiers for parameter expansion, but are - # valid characters in bitbake variable names, so are not included here - for i in ('=', '?', '#', '%', '##', '%%'): - name = "foo%sbar" % i - self.parseExpression("${%s}" % name) - self.assertNotIn(name, self.references) - - def test_until(self): - self.parseExpression("until false; do echo true; done") - self.assertExecs(set(["false", "echo"])) - self.assertReferences(set()) - - def test_case(self): - self.parseExpression(""" -case $foo in -*) -bar -;; -esac -""") - self.assertExecs(set(["bar"])) - self.assertReferences(set()) - - def test_assign_exec(self): - self.parseExpression("a=b c='foo bar' alpha 1 2 3") - self.assertExecs(set(["alpha"])) - - def test_redirect_to_file(self): - self.setEmptyVars(["foo"]) - self.parseExpression("echo foo >${foo}/bar") - self.assertExecs(set(["echo"])) - self.assertReferences(set(["foo"])) - - def test_heredoc(self): - self.setEmptyVars(["theta"]) - self.parseExpression(""" -cat <${B}/cachedpaths -shadow_cv_maildir=${SHADOW_MAILDIR} -shadow_cv_mailfile=${SHADOW_MAILFILE} -shadow_cv_utmpdir=${SHADOW_UTMPDIR} -shadow_cv_logdir=${SHADOW_LOGDIR} -shadow_cv_passwd_dir=${bindir} -END -""") - self.assertReferences(set(v)) - self.assertExecs(set(["cat"])) - -# def test_incomplete_command_expansion(self): -# self.assertRaises(reftracker.ShellSyntaxError, reftracker.execs, -# bbvalue.shparse("cp foo`", self.d), self.d) - -# def test_rogue_dollarsign(self): -# self.setValues({"D" : "/tmp"}) -# self.parseExpression("install -d ${D}$") -# self.assertReferences(set(["D"])) -# self.assertExecs(set(["install"])) - - -class PythonReferenceTest(ReferenceTest): - - def setUp(self): - self.d = bb.data.init() - if hasattr(bb.utils, "_context"): - self.context = bb.utils._context - else: - import builtins - self.context = builtins.__dict__ - - def parseExpression(self, exp): - parsedvar = self.d.expandWithRefs(exp, None) - parser = bb.codeparser.PythonParser("ParserTest", logger) - parser.parse_python(parsedvar.value) - - self.references = parsedvar.references | parser.references - self.execs = parser.execs - self.contains = parser.contains - - @staticmethod - def indent(value): - """Python Snippets have to be indented, python values don't have to -be. These unit tests are testing snippets.""" - return " " + value - - def test_getvar_reference(self): - self.parseExpression("d.getVar('foo')") - self.assertReferences(set(["foo"])) - self.assertExecs(set()) - - def test_getvar_computed_reference(self): - self.parseExpression("d.getVar('f' + 'o' + 'o')") - self.assertReferences(set()) - self.assertExecs(set()) - - def test_getvar_exec_reference(self): - self.parseExpression("eval('d.getVar(\"foo\")')") - self.assertReferences(set()) - self.assertExecs(set(["eval"])) - - def test_var_reference(self): - self.context["foo"] = lambda x: x - self.setEmptyVars(["FOO"]) - self.parseExpression("foo('${FOO}')") - self.assertReferences(set(["FOO"])) - self.assertExecs(set(["foo"])) - del self.context["foo"] - - def test_var_exec(self): - for etype in ("func", "task"): - self.d.setVar("do_something", "echo 'hi mom! ${FOO}'") - self.d.setVarFlag("do_something", etype, True) - self.parseExpression("bb.build.exec_func('do_something', d)") - self.assertReferences(set([])) - self.assertExecs(set(["do_something"])) - - def test_function_reference(self): - self.context["testfunc"] = lambda msg: bb.msg.note(1, None, msg) - self.d.setVar("FOO", "Hello, World!") - self.parseExpression("testfunc('${FOO}')") - self.assertReferences(set(["FOO"])) - self.assertExecs(set(["testfunc"])) - del self.context["testfunc"] - - def test_qualified_function_reference(self): - self.parseExpression("time.time()") - self.assertExecs(set(["time.time"])) - - def test_qualified_function_reference_2(self): - self.parseExpression("os.path.dirname('/foo/bar')") - self.assertExecs(set(["os.path.dirname"])) - - def test_qualified_function_reference_nested(self): - self.parseExpression("time.strftime('%Y%m%d',time.gmtime())") - self.assertExecs(set(["time.strftime", "time.gmtime"])) - - def test_function_reference_chained(self): - self.context["testget"] = lambda: "\tstrip me " - self.parseExpression("testget().strip()") - self.assertExecs(set(["testget"])) - del self.context["testget"] - - def test_contains(self): - self.parseExpression('bb.utils.contains("TESTVAR", "one", "true", "false", d)') - self.assertContains({'TESTVAR': {'one'}}) - - def test_contains_multi(self): - self.parseExpression('bb.utils.contains("TESTVAR", "one two", "true", "false", d)') - self.assertContains({'TESTVAR': {'one two'}}) - - def test_contains_any(self): - self.parseExpression('bb.utils.contains_any("TESTVAR", "hello", "true", "false", d)') - self.assertContains({'TESTVAR': {'hello'}}) - - def test_contains_any_multi(self): - self.parseExpression('bb.utils.contains_any("TESTVAR", "one two three", "true", "false", d)') - self.assertContains({'TESTVAR': {'one', 'two', 'three'}}) - - def test_contains_filter(self): - self.parseExpression('bb.utils.filter("TESTVAR", "hello there world", d)') - self.assertContains({'TESTVAR': {'hello', 'there', 'world'}}) - - -class DependencyReferenceTest(ReferenceTest): - - pydata = """ -d.getVar('somevar') -def test(d): - foo = 'bar %s' % 'foo' -def test2(d): - d.getVar(foo) - d.getVar('bar', False) - test2(d) - -def a(): - \"\"\"some - stuff - \"\"\" - return "heh" - -test(d) - -d.expand(d.getVar("something", False)) -d.expand("${inexpand} somethingelse") -d.getVar(a(), False) -""" - - def test_python(self): - self.d.setVar("FOO", self.pydata) - self.setEmptyVars(["inexpand", "a", "test2", "test"]) - self.d.setVarFlags("FOO", { - "func": True, - "python": True, - "lineno": 1, - "filename": "example.bb", - }) - - deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d) - - self.assertEqual(deps, set(["somevar", "bar", "something", "inexpand", "test", "test2", "a"])) - - - shelldata = """ -foo () { -bar -} -{ -echo baz -$(heh) -eval `moo` -} -a=b -c=d -( -true && false -test -f foo -testval=something -$testval -) || aiee -! inverted -echo ${somevar} - -case foo in -bar) -echo bar -;; -baz) -echo baz -;; -foo*) -echo foo -;; -esac -""" - - def test_shell(self): - execs = ["bar", "echo", "heh", "moo", "true", "aiee"] - self.d.setVar("somevar", "heh") - self.d.setVar("inverted", "echo inverted...") - self.d.setVarFlag("inverted", "func", True) - self.d.setVar("FOO", self.shelldata) - self.d.setVarFlags("FOO", {"func": True}) - self.setEmptyVars(execs) - - deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d) - - self.assertEqual(deps, set(["somevar", "inverted"] + execs)) - - - def test_vardeps(self): - self.d.setVar("oe_libinstall", "echo test") - self.d.setVar("FOO", "foo=oe_libinstall; eval $foo") - self.d.setVarFlag("FOO", "vardeps", "oe_libinstall") - - deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d) - - self.assertEqual(deps, set(["oe_libinstall"])) - - def test_vardeps_expand(self): - self.d.setVar("oe_libinstall", "echo test") - self.d.setVar("FOO", "foo=oe_libinstall; eval $foo") - self.d.setVarFlag("FOO", "vardeps", "${@'oe_libinstall'}") - - deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d) - - self.assertEqual(deps, set(["oe_libinstall"])) - - def test_contains_vardeps(self): - expr = '${@bb.utils.filter("TESTVAR", "somevalue anothervalue", d)} \ - ${@bb.utils.contains("TESTVAR", "testval testval2", "yetanothervalue", "", d)} \ - ${@bb.utils.contains("TESTVAR", "testval2 testval3", "blah", "", d)} \ - ${@bb.utils.contains_any("TESTVAR", "testval2 testval3", "lastone", "", d)}' - parsedvar = self.d.expandWithRefs(expr, None) - # Check contains - self.assertEqual(parsedvar.contains, {'TESTVAR': {'testval2 testval3', 'anothervalue', 'somevalue', 'testval testval2', 'testval2', 'testval3'}}) - # Check dependencies - self.d.setVar('ANOTHERVAR', expr) - self.d.setVar('TESTVAR', 'anothervalue testval testval2') - deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d) - self.assertEqual(sorted(values.splitlines()), - sorted([expr, - 'TESTVAR{anothervalue} = Set', - 'TESTVAR{somevalue} = Unset', - 'TESTVAR{testval testval2} = Set', - 'TESTVAR{testval2 testval3} = Unset', - 'TESTVAR{testval2} = Set', - 'TESTVAR{testval3} = Unset' - ])) - # Check final value - self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['anothervalue', 'yetanothervalue', 'lastone']) - - def test_contains_vardeps_excluded(self): - # Check the ignored_vars option to build_dependencies is handled by contains functionality - varval = '${TESTVAR2} ${@bb.utils.filter("TESTVAR", "somevalue anothervalue", d)}' - self.d.setVar('ANOTHERVAR', varval) - self.d.setVar('TESTVAR', 'anothervalue testval testval2') - self.d.setVar('TESTVAR2', 'testval3') - deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(["TESTVAR"]), self.d, self.d) - self.assertEqual(sorted(values.splitlines()), sorted([varval])) - self.assertEqual(deps, set(["TESTVAR2"])) - self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue']) - - # Check the vardepsexclude flag is handled by contains functionality - self.d.setVarFlag('ANOTHERVAR', 'vardepsexclude', 'TESTVAR') - deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d) - self.assertEqual(sorted(values.splitlines()), sorted([varval])) - self.assertEqual(deps, set(["TESTVAR2"])) - self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue']) - - def test_contains_vardeps_override_operators(self): - # Check override operators handle dependencies correctly with the contains functionality - expr_plain = 'testval' - expr_prepend = '${@bb.utils.filter("TESTVAR1", "testval1", d)} ' - expr_append = ' ${@bb.utils.filter("TESTVAR2", "testval2", d)}' - expr_remove = '${@bb.utils.contains("TESTVAR3", "no-testval", "testval", "", d)}' - # Check dependencies - self.d.setVar('ANOTHERVAR', expr_plain) - self.d.prependVar('ANOTHERVAR', expr_prepend) - self.d.appendVar('ANOTHERVAR', expr_append) - self.d.setVar('ANOTHERVAR:remove', expr_remove) - self.d.setVar('TESTVAR1', 'blah') - self.d.setVar('TESTVAR2', 'testval2') - self.d.setVar('TESTVAR3', 'no-testval') - deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d) - self.assertEqual(sorted(values.splitlines()), - sorted([ - expr_prepend + expr_plain + expr_append, - '_remove of ' + expr_remove, - 'TESTVAR1{testval1} = Unset', - 'TESTVAR2{testval2} = Set', - 'TESTVAR3{no-testval} = Set', - ])) - # Check final value - self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval2']) - - #Currently no wildcard support - #def test_vardeps_wildcards(self): - # self.d.setVar("oe_libinstall", "echo test") - # self.d.setVar("FOO", "foo=oe_libinstall; eval $foo") - # self.d.setVarFlag("FOO", "vardeps", "oe_*") - # self.assertEqual(deps, set(["oe_libinstall"])) - - diff --git a/bitbake/lib/bb/tests/color.py b/bitbake/lib/bb/tests/color.py deleted file mode 100644 index bb70cb393d..0000000000 --- a/bitbake/lib/bb/tests/color.py +++ /dev/null @@ -1,95 +0,0 @@ -# -# BitBake Test for ANSI color code filtering -# -# Copyright (C) 2020 Agilent Technologies, Inc. -# Author: Chris Laplante -# -# SPDX-License-Identifier: MIT -# - -import unittest -import bb.progress -import bb.data -import bb.event -from bb.progress import filter_color, filter_color_n -import io -import re - - -class ProgressWatcher: - def __init__(self): - self._reports = [] - - def handle_event(self, event, d): - self._reports.append((event.progress, event.rate)) - - def reports(self): - return self._reports - - -class ColorCodeTests(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - self._progress_watcher = ProgressWatcher() - bb.event.register("bb.build.TaskProgress", self._progress_watcher.handle_event, data=self.d) - - def tearDown(self): - bb.event.remove("bb.build.TaskProgress", None) - - def test_filter_color(self): - input_string = "~~~~~~~~~~~~^~~~~~~~" - filtered = filter_color(input_string) - self.assertEqual(filtered, "~~~~~~~~~~~~^~~~~~~~") - - def test_filter_color_n(self): - input_string = "~~~~~~~~~~~~^~~~~~~~" - filtered, code_count = filter_color_n(input_string) - self.assertEqual(filtered, "~~~~~~~~~~~~^~~~~~~~") - self.assertEqual(code_count, 4) - - def test_LineFilterProgressHandler_color_filtering(self): - class CustomProgressHandler(bb.progress.LineFilterProgressHandler): - PROGRESS_REGEX = re.compile(r"Progress: (?P\d+)%") - - def writeline(self, line): - match = self.PROGRESS_REGEX.match(line) - if match: - self.update(int(match.group("progress"))) - return False - return True - - buffer = io.StringIO() - handler = CustomProgressHandler(self.d, buffer) - handler.write("Program output!\n") - handler.write("More output!\n") - handler.write("Progress: 10%\n") # 10% - handler.write("Even more\n") - handler.write("Progress: 50%\n") # 50% - handler.write("Progress: 60%\n") # 60% - handler.write("Progress: 100%\n") # 100% - - expected = [(10, None), (50, None), (60, None), (100, None)] - self.assertEqual(self._progress_watcher.reports(), expected) - - self.assertEqual(buffer.getvalue(), "Program output!\nMore output!\nEven more\n") - - def test_BasicProgressHandler_color_filtering(self): - buffer = io.StringIO() - handler = bb.progress.BasicProgressHandler(self.d, outfile=buffer) - handler.write("1%\n") # 1% - handler.write("2%\n") # 2% - handler.write("10%\n") # 10% - handler.write("100%\n") # 100% - - expected = [(0, None), (1, None), (2, None), (10, None), (100, None)] - self.assertListEqual(self._progress_watcher.reports(), expected) - - def test_OutOfProgressHandler_color_filtering(self): - buffer = io.StringIO() - handler = bb.progress.OutOfProgressHandler(self.d, r'(\d+) of (\d+)', outfile=buffer) - handler.write("Text text 1 of 5") # 1/5 - handler.write("Text text 3 of 5") # 3/5 - handler.write("Text text 5 of 5") # 5/5 - - expected = [(0, None), (20.0, None), (60.0, None), (100.0, None)] - self.assertListEqual(self._progress_watcher.reports(), expected) diff --git a/bitbake/lib/bb/tests/compression.py b/bitbake/lib/bb/tests/compression.py deleted file mode 100644 index 16c297b315..0000000000 --- a/bitbake/lib/bb/tests/compression.py +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -from pathlib import Path -import bb.compress.lz4 -import bb.compress.zstd -import contextlib -import os -import shutil -import tempfile -import unittest -import subprocess - - -class CompressionTests(object): - def setUp(self): - self._t = tempfile.TemporaryDirectory() - self.tmpdir = Path(self._t.name) - self.addCleanup(self._t.cleanup) - - def _file_helper(self, mode_suffix, data): - tmp_file = self.tmpdir / "compressed" - - with self.do_open(tmp_file, mode="w" + mode_suffix) as f: - f.write(data) - - with self.do_open(tmp_file, mode="r" + mode_suffix) as f: - read_data = f.read() - - self.assertEqual(read_data, data) - - def test_text_file(self): - self._file_helper("t", "Hello") - - def test_binary_file(self): - self._file_helper("b", "Hello".encode("utf-8")) - - def _pipe_helper(self, mode_suffix, data): - rfd, wfd = os.pipe() - with open(rfd, "rb") as r, open(wfd, "wb") as w: - with self.do_open(r, mode="r" + mode_suffix) as decompress: - with self.do_open(w, mode="w" + mode_suffix) as compress: - compress.write(data) - read_data = decompress.read() - - self.assertEqual(read_data, data) - - def test_text_pipe(self): - self._pipe_helper("t", "Hello") - - def test_binary_pipe(self): - self._pipe_helper("b", "Hello".encode("utf-8")) - - def test_bad_decompress(self): - tmp_file = self.tmpdir / "compressed" - with tmp_file.open("wb") as f: - f.write(b"\x00") - - with self.assertRaises(OSError): - with self.do_open(tmp_file, mode="rb", stderr=subprocess.DEVNULL) as f: - data = f.read() - - -class LZ4Tests(CompressionTests, unittest.TestCase): - def setUp(self): - if shutil.which("lz4") is None: - self.skipTest("'lz4' not found") - super().setUp() - - @contextlib.contextmanager - def do_open(self, *args, **kwargs): - with bb.compress.lz4.open(*args, **kwargs) as f: - yield f - - -class ZStdTests(CompressionTests, unittest.TestCase): - def setUp(self): - if shutil.which("zstd") is None: - self.skipTest("'zstd' not found") - super().setUp() - - @contextlib.contextmanager - def do_open(self, *args, **kwargs): - with bb.compress.zstd.open(*args, **kwargs) as f: - yield f - - -class PZStdTests(CompressionTests, unittest.TestCase): - def setUp(self): - if shutil.which("pzstd") is None: - self.skipTest("'pzstd' not found") - super().setUp() - - @contextlib.contextmanager - def do_open(self, *args, **kwargs): - with bb.compress.zstd.open(*args, num_threads=2, **kwargs) as f: - yield f diff --git a/bitbake/lib/bb/tests/cooker.py b/bitbake/lib/bb/tests/cooker.py deleted file mode 100644 index 9e524ae345..0000000000 --- a/bitbake/lib/bb/tests/cooker.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# BitBake Tests for cooker.py -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import unittest -import os -import bb, bb.cooker -import re -import logging - -# Cooker tests -class CookerTest(unittest.TestCase): - def setUp(self): - # At least one variable needs to be set - self.d = bb.data.init() - topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testdata/cooker") - self.d.setVar('TOPDIR', topdir) - - def test_CookerCollectFiles_sublayers(self): - '''Test that a sublayer of an existing layer does not trigger - No bb files matched ...''' - - def append_collection(topdir, path, d): - collection = path.split('/')[-1] - pattern = "^" + topdir + "/" + path + "/" - regex = re.compile(pattern) - priority = 5 - - d.setVar('BBFILE_COLLECTIONS', (d.getVar('BBFILE_COLLECTIONS') or "") + " " + collection) - d.setVar('BBFILE_PATTERN_%s' % (collection), pattern) - d.setVar('BBFILE_PRIORITY_%s' % (collection), priority) - - return (collection, pattern, regex, priority) - - topdir = self.d.getVar("TOPDIR") - - # Priorities: list of (collection, pattern, regex, priority) - bbfile_config_priorities = [] - # Order is important for this test, shortest to longest is typical failure case - bbfile_config_priorities.append( append_collection(topdir, 'first', self.d) ) - bbfile_config_priorities.append( append_collection(topdir, 'second', self.d) ) - bbfile_config_priorities.append( append_collection(topdir, 'second/third', self.d) ) - - pkgfns = [ topdir + '/first/recipes/sample1_1.0.bb', - topdir + '/second/recipes/sample2_1.0.bb', - topdir + '/second/third/recipes/sample3_1.0.bb' ] - - class LogHandler(logging.Handler): - def __init__(self): - logging.Handler.__init__(self) - self.logdata = [] - - def emit(self, record): - self.logdata.append(record.getMessage()) - - # Move cooker to use my special logging - logger = bb.cooker.logger - log_handler = LogHandler() - logger.addHandler(log_handler) - collection = bb.cooker.CookerCollectFiles(bbfile_config_priorities) - collection.collection_priorities(pkgfns, pkgfns, self.d) - logger.removeHandler(log_handler) - - # Should be empty (no generated messages) - expected = [] - - self.assertEqual(log_handler.logdata, expected) diff --git a/bitbake/lib/bb/tests/cow.py b/bitbake/lib/bb/tests/cow.py deleted file mode 100644 index 75142649c4..0000000000 --- a/bitbake/lib/bb/tests/cow.py +++ /dev/null @@ -1,317 +0,0 @@ -# -# BitBake Tests for Copy-on-Write (cow.py) -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Copyright 2006 Holger Freyther -# Copyright (C) 2020 Agilent Technologies, Inc. -# - -import io -import re -import sys -import unittest -import contextlib -import collections - -from bb.COW import COWDictBase, COWSetBase, COWDictMeta, COWSetMeta - - -class COWTestCase(unittest.TestCase): - """ - Test case for the COW module from mithro - """ - - def setUp(self): - self._track_warnings = False - self._warning_file = io.StringIO() - self._unhandled_warnings = collections.deque() - COWDictBase.__warn__ = self._warning_file - - def tearDown(self): - COWDictBase.__warn__ = sys.stderr - if self._track_warnings: - self._checkAllWarningsRead() - - def trackWarnings(self): - self._track_warnings = True - - def _collectWarnings(self): - self._warning_file.seek(0) - for warning in self._warning_file: - self._unhandled_warnings.append(warning.rstrip("\n")) - self._warning_file.truncate(0) - self._warning_file.seek(0) - - def _checkAllWarningsRead(self): - self._collectWarnings() - self.assertSequenceEqual(self._unhandled_warnings, []) - - @contextlib.contextmanager - def checkReportsWarning(self, expected_warning): - self._checkAllWarningsRead() - yield - self._collectWarnings() - warning = self._unhandled_warnings.popleft() - self.assertEqual(warning, expected_warning) - - def checkStrOutput(self, obj, expected_levels, expected_keys): - if obj.__class__ is COWDictMeta: - expected_class_name = "COWDict" - elif obj.__class__ is COWSetMeta: - expected_class_name = "COWSet" - else: - self.fail("obj is of unknown type {0}".format(type(obj))) - s = str(obj) - regex = re.compile(r"<(\w+) Level: (\d+) Current Keys: (\d+)>") - match = regex.match(s) - self.assertIsNotNone(match, "bad str output: '{0}'".format(s)) - class_name = match.group(1) - self.assertEqual(class_name, expected_class_name) - levels = int(match.group(2)) - self.assertEqual(levels, expected_levels, "wrong # levels in str: '{0}'".format(s)) - keys = int(match.group(3)) - self.assertEqual(keys, expected_keys, "wrong # keys in str: '{0}'".format(s)) - - def testGetSet(self): - """ - Test and set - """ - a = COWDictBase.copy() - - self.assertEqual(False, 'a' in a) - - a['a'] = 'a' - a['b'] = 'b' - self.assertEqual(True, 'a' in a) - self.assertEqual(True, 'b' in a) - self.assertEqual('a', a['a']) - self.assertEqual('b', a['b']) - - def testCopyCopy(self): - """ - Test the copy of copies - """ - - # create two COW dict 'instances' - b = COWDictBase.copy() - c = COWDictBase.copy() - - # assign some keys to one instance, some keys to another - b['a'] = 10 - b['c'] = 20 - c['a'] = 30 - - # test separation of the two instances - self.assertEqual(False, 'c' in c) - self.assertEqual(30, c['a']) - self.assertEqual(10, b['a']) - - # test copy - b_2 = b.copy() - c_2 = c.copy() - - self.assertEqual(False, 'c' in c_2) - self.assertEqual(10, b_2['a']) - - b_2['d'] = 40 - self.assertEqual(False, 'd' in c_2) - self.assertEqual(True, 'd' in b_2) - self.assertEqual(40, b_2['d']) - self.assertEqual(False, 'd' in b) - self.assertEqual(False, 'd' in c) - - c_2['d'] = 30 - self.assertEqual(True, 'd' in c_2) - self.assertEqual(True, 'd' in b_2) - self.assertEqual(30, c_2['d']) - self.assertEqual(40, b_2['d']) - self.assertEqual(False, 'd' in b) - self.assertEqual(False, 'd' in c) - - # test copy of the copy - c_3 = c_2.copy() - b_3 = b_2.copy() - b_3_2 = b_2.copy() - - c_3['e'] = 4711 - self.assertEqual(4711, c_3['e']) - self.assertEqual(False, 'e' in c_2) - self.assertEqual(False, 'e' in b_3) - self.assertEqual(False, 'e' in b_3_2) - self.assertEqual(False, 'e' in b_2) - - b_3['e'] = 'viel' - self.assertEqual('viel', b_3['e']) - self.assertEqual(4711, c_3['e']) - self.assertEqual(False, 'e' in c_2) - self.assertEqual(True, 'e' in b_3) - self.assertEqual(False, 'e' in b_3_2) - self.assertEqual(False, 'e' in b_2) - - def testCow(self): - self.trackWarnings() - - c = COWDictBase.copy() - c['123'] = 1027 - c['other'] = 4711 - c['d'] = {'abc': 10, 'bcd': 20} - - copy = c.copy() - - self.assertEqual(1027, c['123']) - self.assertEqual(4711, c['other']) - self.assertEqual({'abc': 10, 'bcd': 20}, c['d']) - self.assertEqual(1027, copy['123']) - self.assertEqual(4711, copy['other']) - with self.checkReportsWarning("Warning: Doing a copy because d is a mutable type."): - self.assertEqual({'abc': 10, 'bcd': 20}, copy['d']) - - # cow it now - copy['123'] = 1028 - copy['other'] = 4712 - copy['d']['abc'] = 20 - - self.assertEqual(1027, c['123']) - self.assertEqual(4711, c['other']) - self.assertEqual({'abc': 10, 'bcd': 20}, c['d']) - self.assertEqual(1028, copy['123']) - self.assertEqual(4712, copy['other']) - self.assertEqual({'abc': 20, 'bcd': 20}, copy['d']) - - def testOriginalTestSuite(self): - # This test suite is a port of the original one from COW.py - self.trackWarnings() - - a = COWDictBase.copy() - self.checkStrOutput(a, 1, 0) - - a['a'] = 'a' - a['b'] = 'b' - a['dict'] = {} - self.checkStrOutput(a, 1, 4) # 4th member is dict__mutable__ - - b = a.copy() - self.checkStrOutput(b, 2, 0) - b['c'] = 'b' - self.checkStrOutput(b, 2, 1) - - with self.checkReportsWarning("Warning: If you aren't going to change any of the values call with True."): - self.assertListEqual(list(a.iteritems()), - [('a', 'a'), - ('b', 'b'), - ('dict', {}) - ]) - - with self.checkReportsWarning("Warning: If you aren't going to change any of the values call with True."): - b_gen = b.iteritems() - self.assertTupleEqual(next(b_gen), ('a', 'a')) - self.assertTupleEqual(next(b_gen), ('b', 'b')) - self.assertTupleEqual(next(b_gen), ('c', 'b')) - with self.checkReportsWarning("Warning: Doing a copy because dict is a mutable type."): - self.assertTupleEqual(next(b_gen), ('dict', {})) - with self.assertRaises(StopIteration): - next(b_gen) - - b['dict']['a'] = 'b' - b['a'] = 'c' - - self.checkStrOutput(a, 1, 4) - self.checkStrOutput(b, 2, 3) - - with self.checkReportsWarning("Warning: If you aren't going to change any of the values call with True."): - self.assertListEqual(list(a.iteritems()), - [('a', 'a'), - ('b', 'b'), - ('dict', {}) - ]) - - with self.checkReportsWarning("Warning: If you aren't going to change any of the values call with True."): - b_gen = b.iteritems() - self.assertTupleEqual(next(b_gen), ('a', 'c')) - self.assertTupleEqual(next(b_gen), ('b', 'b')) - self.assertTupleEqual(next(b_gen), ('c', 'b')) - self.assertTupleEqual(next(b_gen), ('dict', {'a': 'b'})) - with self.assertRaises(StopIteration): - next(b_gen) - - with self.assertRaises(KeyError): - print(b["dict2"]) - - a['set'] = COWSetBase() - a['set'].add("o1") - a['set'].add("o1") - a['set'].add("o2") - self.assertSetEqual(set(a['set'].itervalues()), {"o1", "o2"}) - self.assertSetEqual(set(b['set'].itervalues()), {"o1", "o2"}) - - b['set'].add('o3') - self.assertSetEqual(set(a['set'].itervalues()), {"o1", "o2"}) - self.assertSetEqual(set(b['set'].itervalues()), {"o1", "o2", "o3"}) - - a['set2'] = set() - a['set2'].add("o1") - a['set2'].add("o1") - a['set2'].add("o2") - - # We don't expect 'a' to change anymore - def check_a(): - with self.checkReportsWarning("Warning: If you aren't going to change any of the values call with True."): - a_gen = a.iteritems() - self.assertTupleEqual(next(a_gen), ('a', 'a')) - self.assertTupleEqual(next(a_gen), ('b', 'b')) - self.assertTupleEqual(next(a_gen), ('dict', {})) - self.assertTupleEqual(next(a_gen), ('set2', {'o1', 'o2'})) - a_sub_set = next(a_gen) - self.assertEqual(a_sub_set[0], 'set') - self.checkStrOutput(a_sub_set[1], 1, 2) - self.assertSetEqual(set(a_sub_set[1].itervalues()), {'o1', 'o2'}) - - check_a() - - b_gen = b.iteritems(readonly=True) - self.assertTupleEqual(next(b_gen), ('a', 'c')) - self.assertTupleEqual(next(b_gen), ('b', 'b')) - self.assertTupleEqual(next(b_gen), ('c', 'b')) - self.assertTupleEqual(next(b_gen), ('dict', {'a': 'b'})) - self.assertTupleEqual(next(b_gen), ('set2', {'o1', 'o2'})) - b_sub_set = next(b_gen) - self.assertEqual(b_sub_set[0], 'set') - self.checkStrOutput(b_sub_set[1], 2, 1) - self.assertSetEqual(set(b_sub_set[1].itervalues()), {'o1', 'o2', 'o3'}) - - del b['b'] - with self.assertRaises(KeyError): - print(b['b']) - self.assertFalse('b' in b) - - check_a() - - b.__revertitem__('b') - check_a() - self.assertEqual(b['b'], 'b') - self.assertTrue('b' in b) - - b.__revertitem__('dict') - check_a() - - b_gen = b.iteritems(readonly=True) - self.assertTupleEqual(next(b_gen), ('a', 'c')) - self.assertTupleEqual(next(b_gen), ('b', 'b')) - self.assertTupleEqual(next(b_gen), ('c', 'b')) - self.assertTupleEqual(next(b_gen), ('dict', {})) - self.assertTupleEqual(next(b_gen), ('set2', {'o1', 'o2'})) - b_sub_set = next(b_gen) - self.assertEqual(b_sub_set[0], 'set') - self.checkStrOutput(b_sub_set[1], 2, 1) - self.assertSetEqual(set(b_sub_set[1].itervalues()), {'o1', 'o2', 'o3'}) - - self.checkStrOutput(a, 1, 6) - self.checkStrOutput(b, 2, 3) - - def testSetMethods(self): - s = COWSetBase() - with self.assertRaises(TypeError): - print(s.iteritems()) - with self.assertRaises(TypeError): - print(s.iterkeys()) diff --git a/bitbake/lib/bb/tests/data.py b/bitbake/lib/bb/tests/data.py deleted file mode 100644 index a895f6a58e..0000000000 --- a/bitbake/lib/bb/tests/data.py +++ /dev/null @@ -1,620 +0,0 @@ -# -# BitBake Tests for the Data Store (data.py/data_smart.py) -# -# Copyright (C) 2010 Chris Larson -# Copyright (C) 2012 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import unittest -import bb -import bb.data -import bb.parse -import logging -import os - -class LogRecord(): - def __enter__(self): - logs = [] - class LogHandler(logging.Handler): - def emit(self, record): - logs.append(record) - logger = logging.getLogger("BitBake") - handler = LogHandler() - self.handler = handler - logger.addHandler(handler) - return logs - def __exit__(self, type, value, traceback): - logger = logging.getLogger("BitBake") - logger.removeHandler(self.handler) - return - -def logContains(item, logs): - for l in logs: - m = l.getMessage() - if item in m: - return True - return False - -class DataExpansions(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - self.d["foo"] = "value_of_foo" - self.d["bar"] = "value_of_bar" - self.d["value_of_foo"] = "value_of_'value_of_foo'" - - def test_one_var(self): - val = self.d.expand("${foo}") - self.assertEqual(str(val), "value_of_foo") - - def test_indirect_one_var(self): - val = self.d.expand("${${foo}}") - self.assertEqual(str(val), "value_of_'value_of_foo'") - - def test_indirect_and_another(self): - val = self.d.expand("${${foo}} ${bar}") - self.assertEqual(str(val), "value_of_'value_of_foo' value_of_bar") - - def test_python_snippet(self): - val = self.d.expand("${@5*12}") - self.assertEqual(str(val), "60") - - def test_python_snippet_w_dict(self): - val = self.d.expand("${@{ 'green': 1, 'blue': 2 }['green']}") - self.assertEqual(str(val), "1") - - def test_python_unexpanded_multi(self): - self.d.setVar("bar", "${unsetvar}") - val = self.d.expand("${@2*2},${foo},${@d.getVar('foo') + ' ${bar}'},${foo}") - self.assertEqual(str(val), "4,value_of_foo,${@d.getVar('foo') + ' ${unsetvar}'},value_of_foo") - - def test_expand_in_python_snippet(self): - val = self.d.expand("${@'boo ' + '${foo}'}") - self.assertEqual(str(val), "boo value_of_foo") - - def test_python_snippet_getvar(self): - val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}") - self.assertEqual(str(val), "value_of_foo value_of_bar") - - def test_python_snippet_function_reference(self): - self.d.setVar("TESTVAL", "testvalue") - self.d.setVar("testfunc", 'd.getVar("TESTVAL")') - context = bb.utils.get_context() - context["testfunc"] = lambda d: d.getVar("TESTVAL") - val = self.d.expand("${@testfunc(d)}") - self.assertEqual(str(val), "testvalue") - - def test_python_snippet_builtin_metadata(self): - self.d.setVar("eval", "INVALID") - self.d.expand("${@eval('3')}") - - def test_python_unexpanded(self): - self.d.setVar("bar", "${unsetvar}") - val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}") - self.assertEqual(str(val), "${@d.getVar('foo') + ' ${unsetvar}'}") - - def test_python_snippet_syntax_error(self): - self.d.setVar("FOO", "${@foo = 5}") - self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) - - def test_python_snippet_runtime_error(self): - self.d.setVar("FOO", "${@int('test')}") - self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) - - def test_python_snippet_error_path(self): - self.d.setVar("FOO", "foo value ${BAR}") - self.d.setVar("BAR", "bar value ${@int('test')}") - self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) - - def test_value_containing_value(self): - val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}") - self.assertEqual(str(val), "value_of_foo value_of_bar") - - def test_reference_undefined_var(self): - val = self.d.expand("${undefinedvar} meh") - self.assertEqual(str(val), "${undefinedvar} meh") - - def test_double_reference(self): - self.d.setVar("BAR", "bar value") - self.d.setVar("FOO", "${BAR} foo ${BAR}") - val = self.d.getVar("FOO") - self.assertEqual(str(val), "bar value foo bar value") - - def test_direct_recursion(self): - self.d.setVar("FOO", "${FOO}") - self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) - - def test_indirect_recursion(self): - self.d.setVar("FOO", "${BAR}") - self.d.setVar("BAR", "${BAZ}") - self.d.setVar("BAZ", "${FOO}") - self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) - - def test_recursion_exception(self): - self.d.setVar("FOO", "${BAR}") - self.d.setVar("BAR", "${${@'FOO'}}") - self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True) - - def test_incomplete_varexp_single_quotes(self): - self.d.setVar("FOO", "sed -i -e 's:IP{:I${:g' $pc") - val = self.d.getVar("FOO") - self.assertEqual(str(val), "sed -i -e 's:IP{:I${:g' $pc") - - def test_nonstring(self): - self.d.setVar("TEST", 5) - val = self.d.getVar("TEST") - self.assertEqual(str(val), "5") - - def test_rename(self): - self.d.renameVar("foo", "newfoo") - self.assertEqual(self.d.getVar("newfoo", False), "value_of_foo") - self.assertEqual(self.d.getVar("foo", False), None) - - def test_deletion(self): - self.d.delVar("foo") - self.assertEqual(self.d.getVar("foo", False), None) - - def test_keys(self): - keys = list(self.d.keys()) - self.assertCountEqual(keys, ['value_of_foo', 'foo', 'bar']) - - def test_keys_deletion(self): - newd = bb.data.createCopy(self.d) - newd.delVar("bar") - keys = list(newd.keys()) - self.assertCountEqual(keys, ['value_of_foo', 'foo']) - -class TestNestedExpansions(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - self.d["foo"] = "foo" - self.d["bar"] = "bar" - self.d["value_of_foobar"] = "187" - - def test_refs(self): - val = self.d.expand("${value_of_${foo}${bar}}") - self.assertEqual(str(val), "187") - - #def test_python_refs(self): - # val = self.d.expand("${@${@3}**2 + ${@4}**2}") - # self.assertEqual(str(val), "25") - - def test_ref_in_python_ref(self): - val = self.d.expand("${@'${foo}' + 'bar'}") - self.assertEqual(str(val), "foobar") - - def test_python_ref_in_ref(self): - val = self.d.expand("${${@'f'+'o'+'o'}}") - self.assertEqual(str(val), "foo") - - def test_deep_nesting(self): - depth = 100 - val = self.d.expand("${" * depth + "foo" + "}" * depth) - self.assertEqual(str(val), "foo") - - #def test_deep_python_nesting(self): - # depth = 50 - # val = self.d.expand("${@" * depth + "1" + "+1}" * depth) - # self.assertEqual(str(val), str(depth + 1)) - - def test_mixed(self): - val = self.d.expand("${value_of_${@('${foo}'+'bar')[0:3]}${${@'BAR'.lower()}}}") - self.assertEqual(str(val), "187") - - def test_runtime(self): - val = self.d.expand("${${@'value_of' + '_f'+'o'+'o'+'b'+'a'+'r'}}") - self.assertEqual(str(val), "187") - -class TestMemoize(unittest.TestCase): - def test_memoized(self): - d = bb.data.init() - d.setVar("FOO", "bar") - self.assertTrue(d.getVar("FOO", False) is d.getVar("FOO", False)) - - def test_not_memoized(self): - d1 = bb.data.init() - d2 = bb.data.init() - d1.setVar("FOO", "bar") - d2.setVar("FOO", "bar2") - self.assertTrue(d1.getVar("FOO", False) is not d2.getVar("FOO", False)) - - def test_changed_after_memoized(self): - d = bb.data.init() - d.setVar("foo", "value of foo") - self.assertEqual(str(d.getVar("foo", False)), "value of foo") - d.setVar("foo", "second value of foo") - self.assertEqual(str(d.getVar("foo", False)), "second value of foo") - - def test_same_value(self): - d = bb.data.init() - d.setVar("foo", "value of") - d.setVar("bar", "value of") - self.assertEqual(d.getVar("foo", False), - d.getVar("bar", False)) - -class TestConcat(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - self.d.setVar("FOO", "foo") - self.d.setVar("VAL", "val") - self.d.setVar("BAR", "bar") - - def test_prepend(self): - self.d.setVar("TEST", "${VAL}") - self.d.prependVar("TEST", "${FOO}:") - self.assertEqual(self.d.getVar("TEST"), "foo:val") - - def test_append(self): - self.d.setVar("TEST", "${VAL}") - self.d.appendVar("TEST", ":${BAR}") - self.assertEqual(self.d.getVar("TEST"), "val:bar") - - def test_multiple_append(self): - self.d.setVar("TEST", "${VAL}") - self.d.prependVar("TEST", "${FOO}:") - self.d.appendVar("TEST", ":val2") - self.d.appendVar("TEST", ":${BAR}") - self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar") - -class TestConcatOverride(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - self.d.setVar("FOO", "foo") - self.d.setVar("VAL", "val") - self.d.setVar("BAR", "bar") - - def test_prepend(self): - self.d.setVar("TEST", "${VAL}") - self.d.setVar("TEST:prepend", "${FOO}:") - self.assertEqual(self.d.getVar("TEST"), "foo:val") - - def test_append(self): - self.d.setVar("TEST", "${VAL}") - self.d.setVar("TEST:append", ":${BAR}") - self.assertEqual(self.d.getVar("TEST"), "val:bar") - - def test_multiple_append(self): - self.d.setVar("TEST", "${VAL}") - self.d.setVar("TEST:prepend", "${FOO}:") - self.d.setVar("TEST:append", ":val2") - self.d.setVar("TEST:append", ":${BAR}") - self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar") - - def test_append_unset(self): - self.d.setVar("TEST:prepend", "${FOO}:") - self.d.setVar("TEST:append", ":val2") - self.d.setVar("TEST:append", ":${BAR}") - self.assertEqual(self.d.getVar("TEST"), "foo::val2:bar") - - def test_remove(self): - self.d.setVar("TEST", "${VAL} ${BAR}") - self.d.setVar("TEST:remove", "val") - self.assertEqual(self.d.getVar("TEST"), " bar") - - def test_remove_cleared(self): - self.d.setVar("TEST", "${VAL} ${BAR}") - self.d.setVar("TEST:remove", "val") - self.d.setVar("TEST", "${VAL} ${BAR}") - self.assertEqual(self.d.getVar("TEST"), "val bar") - - # Ensure the value is unchanged if we have an inactive remove override - # (including that whitespace is preserved) - def test_remove_inactive_override(self): - self.d.setVar("TEST", "${VAL} ${BAR} 123") - self.d.setVar("TEST:remove:inactiveoverride", "val") - self.assertEqual(self.d.getVar("TEST"), "val bar 123") - - def test_doubleref_remove(self): - self.d.setVar("TEST", "${VAL} ${BAR}") - self.d.setVar("TEST:remove", "val") - self.d.setVar("TEST_TEST", "${TEST} ${TEST}") - self.assertEqual(self.d.getVar("TEST_TEST"), " bar bar") - - def test_empty_remove(self): - self.d.setVar("TEST", "") - self.d.setVar("TEST:remove", "val") - self.assertEqual(self.d.getVar("TEST"), "") - - def test_remove_expansion(self): - self.d.setVar("BAR", "Z") - self.d.setVar("TEST", "${BAR}/X Y") - self.d.setVar("TEST:remove", "${BAR}/X") - self.assertEqual(self.d.getVar("TEST"), " Y") - - def test_remove_expansion_items(self): - self.d.setVar("TEST", "A B C D") - self.d.setVar("BAR", "B D") - self.d.setVar("TEST:remove", "${BAR}") - self.assertEqual(self.d.getVar("TEST"), "A C ") - - def test_remove_preserve_whitespace(self): - # When the removal isn't active, the original value should be preserved - self.d.setVar("TEST", " A B") - self.d.setVar("TEST:remove", "C") - self.assertEqual(self.d.getVar("TEST"), " A B") - - def test_remove_preserve_whitespace2(self): - # When the removal is active preserve the whitespace - self.d.setVar("TEST", " A B") - self.d.setVar("TEST:remove", "B") - self.assertEqual(self.d.getVar("TEST"), " A ") - -class TestOverrides(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - self.d.setVar("OVERRIDES", "foo:bar:local") - self.d.setVar("TEST", "testvalue") - - def test_no_override(self): - self.assertEqual(self.d.getVar("TEST"), "testvalue") - - def test_one_override(self): - self.d.setVar("TEST:bar", "testvalue2") - self.assertEqual(self.d.getVar("TEST"), "testvalue2") - - def test_one_override_unset(self): - self.d.setVar("TEST2:bar", "testvalue2") - - self.assertEqual(self.d.getVar("TEST2"), "testvalue2") - self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST2', 'OVERRIDES', 'TEST2:bar']) - - def test_multiple_override(self): - self.d.setVar("TEST:bar", "testvalue2") - self.d.setVar("TEST:local", "testvalue3") - self.d.setVar("TEST:foo", "testvalue4") - self.assertEqual(self.d.getVar("TEST"), "testvalue3") - self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST:foo', 'OVERRIDES', 'TEST:bar', 'TEST:local']) - - def test_multiple_combined_overrides(self): - self.d.setVar("TEST:local:foo:bar", "testvalue3") - self.assertEqual(self.d.getVar("TEST"), "testvalue3") - - def test_multiple_overrides_unset(self): - self.d.setVar("TEST2:local:foo:bar", "testvalue3") - self.assertEqual(self.d.getVar("TEST2"), "testvalue3") - - def test_keyexpansion_override(self): - self.d.setVar("LOCAL", "local") - self.d.setVar("TEST:bar", "testvalue2") - self.d.setVar("TEST:${LOCAL}", "testvalue3") - self.d.setVar("TEST:foo", "testvalue4") - bb.data.expandKeys(self.d) - self.assertEqual(self.d.getVar("TEST"), "testvalue3") - - def test_rename_override(self): - self.d.setVar("ALTERNATIVE:ncurses-tools:class-target", "a") - self.d.setVar("OVERRIDES", "class-target") - self.d.renameVar("ALTERNATIVE:ncurses-tools", "ALTERNATIVE:lib32-ncurses-tools") - self.assertEqual(self.d.getVar("ALTERNATIVE:lib32-ncurses-tools"), "a") - - def test_underscore_override(self): - self.d.setVar("TEST:bar", "testvalue2") - self.d.setVar("TEST:some_val", "testvalue3") - self.d.setVar("TEST:foo", "testvalue4") - self.d.setVar("OVERRIDES", "foo:bar:some_val") - self.assertEqual(self.d.getVar("TEST"), "testvalue3") - - # Test an override with _ in it based on a real world OE issue - def test_underscore_override_2(self): - self.d.setVar("TARGET_ARCH", "x86_64") - self.d.setVar("PN", "test-${TARGET_ARCH}") - self.d.setVar("VERSION", "1") - self.d.setVar("VERSION:pn-test-${TARGET_ARCH}", "2") - self.d.setVar("OVERRIDES", "pn-${PN}") - bb.data.expandKeys(self.d) - self.assertEqual(self.d.getVar("VERSION"), "2") - - def test_remove_with_override(self): - self.d.setVar("TEST:bar", "testvalue2") - self.d.setVar("TEST:some_val", "testvalue3 testvalue5") - self.d.setVar("TEST:some_val:remove", "testvalue3") - self.d.setVar("TEST:foo", "testvalue4") - self.d.setVar("OVERRIDES", "foo:bar:some_val") - self.assertEqual(self.d.getVar("TEST"), " testvalue5") - - def test_append_and_override_1(self): - self.d.setVar("TEST:append", "testvalue2") - self.d.setVar("TEST:bar", "testvalue3") - self.assertEqual(self.d.getVar("TEST"), "testvalue3testvalue2") - - def test_append_and_override_2(self): - self.d.setVar("TEST:append:bar", "testvalue2") - self.assertEqual(self.d.getVar("TEST"), "testvaluetestvalue2") - - def test_append_and_override_3(self): - self.d.setVar("TEST:bar:append", "testvalue2") - self.assertEqual(self.d.getVar("TEST"), "testvalue2") - - def test_append_and_unused_override(self): - # Had a bug where an unused override append could return "" instead of None - self.d.setVar("BAR:append:unusedoverride", "testvalue2") - self.assertEqual(self.d.getVar("BAR"), None) - -class TestKeyExpansion(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - self.d.setVar("FOO", "foo") - self.d.setVar("BAR", "foo") - - def test_keyexpand(self): - self.d.setVar("VAL_${FOO}", "A") - self.d.setVar("VAL_${BAR}", "B") - with LogRecord() as logs: - bb.data.expandKeys(self.d) - self.assertTrue(logContains("Variable key VAL_${FOO} (A) replaces original key VAL_foo (B)", logs)) - self.assertEqual(self.d.getVar("VAL_foo"), "A") - -class TestFlags(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - self.d.setVar("foo", "value of foo") - self.d.setVarFlag("foo", "flag1", "value of flag1") - self.d.setVarFlag("foo", "_defaultval_flag_flag1", "default of flag1") - self.d.setVarFlag("foo", "flag2", "value of flag2") - self.d.setVarFlag("foo", "_defaultval_flag_flag2", "default of flag2") - self.d.setVarFlag("foo", "flag3", "value of flag3") - self.d.setVarFlag("foo", "_defaultval_flag_flagnovalue", "default of flagnovalue") - - def test_setflag(self): - self.assertEqual(self.d.getVarFlag("foo", "flag1", False), "value of flag1") - self.assertEqual(self.d.getVarFlag("foo", "flag2", False), "value of flag2") - self.assertDictEqual( - self.d.getVarFlags("foo"), - { - "flag1": "value of flag1", - "flag2": "value of flag2", - "flag3": "value of flag3", - "flagnovalue": "default of flagnovalue", - } - ) - self.assertDictEqual( - self.d.getVarFlags("foo", internalflags=True), - { - "_content": "value of foo", - "flag1": "value of flag1", - "flag2": "value of flag2", - "flag3": "value of flag3", - "_defaultval_flag_flag1": "default of flag1", - "_defaultval_flag_flag2": "default of flag2", - "_defaultval_flag_flagnovalue": "default of flagnovalue", - } - ) - - def test_delflag(self): - self.d.delVarFlag("foo", "flag2") - self.d.delVarFlag("foo", "flag3") - self.assertEqual(self.d.getVarFlag("foo", "flag1", False), "value of flag1") - self.assertEqual(self.d.getVarFlag("foo", "flag2", False), None) - self.assertDictEqual( - self.d.getVarFlags("foo"), - { - "flag1": "value of flag1", - "flagnovalue": "default of flagnovalue", - } - ) - self.assertDictEqual( - self.d.getVarFlags("foo", internalflags=True), - { - "_content": "value of foo", - "flag1": "value of flag1", - "_defaultval_flag_flag1": "default of flag1", - "_defaultval_flag_flagnovalue": "default of flagnovalue", - } - ) - - def test_delvar(self): - self.d.delVar("foo") - self.assertEqual(self.d.getVarFlag("foo", "flag1", False), None) - self.assertEqual(self.d.getVarFlag("foo", "flag2", False), None) - self.assertEqual(self.d.getVarFlags("foo", internalflags=True), None) - -class Contains(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - self.d.setVar("SOMEFLAG", "a b c") - - def test_contains(self): - self.assertTrue(bb.utils.contains("SOMEFLAG", "a", True, False, self.d)) - self.assertTrue(bb.utils.contains("SOMEFLAG", "b", True, False, self.d)) - self.assertTrue(bb.utils.contains("SOMEFLAG", "c", True, False, self.d)) - - self.assertTrue(bb.utils.contains("SOMEFLAG", "a b", True, False, self.d)) - self.assertTrue(bb.utils.contains("SOMEFLAG", "b c", True, False, self.d)) - self.assertTrue(bb.utils.contains("SOMEFLAG", "c a", True, False, self.d)) - - self.assertTrue(bb.utils.contains("SOMEFLAG", "a b c", True, False, self.d)) - self.assertTrue(bb.utils.contains("SOMEFLAG", "c b a", True, False, self.d)) - - self.assertFalse(bb.utils.contains("SOMEFLAG", "x", True, False, self.d)) - self.assertFalse(bb.utils.contains("SOMEFLAG", "a x", True, False, self.d)) - self.assertFalse(bb.utils.contains("SOMEFLAG", "x c b", True, False, self.d)) - self.assertFalse(bb.utils.contains("SOMEFLAG", "x c b a", True, False, self.d)) - - def test_contains_any(self): - self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a", True, False, self.d)) - self.assertTrue(bb.utils.contains_any("SOMEFLAG", "b", True, False, self.d)) - self.assertTrue(bb.utils.contains_any("SOMEFLAG", "c", True, False, self.d)) - - self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a b", True, False, self.d)) - self.assertTrue(bb.utils.contains_any("SOMEFLAG", "b c", True, False, self.d)) - self.assertTrue(bb.utils.contains_any("SOMEFLAG", "c a", True, False, self.d)) - - self.assertTrue(bb.utils.contains_any("SOMEFLAG", "a x", True, False, self.d)) - self.assertTrue(bb.utils.contains_any("SOMEFLAG", "x c", True, False, self.d)) - - self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x", True, False, self.d)) - self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x y z", True, False, self.d)) - - -class TaskHash(unittest.TestCase): - def test_taskhashes(self): - def gettask_bashhash(taskname, d): - tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d, set()) - taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, set(), "somefile") - bb.warn(str(lookupcache)) - return basehash["somefile:" + taskname] - - d = bb.data.init() - d.setVar("__BBTASKS", ["mytask"]) - d.setVar("__exportlist", []) - d.setVar("mytask", "${MYCOMMAND}") - d.setVar("MYCOMMAND", "${VAR}; foo; bar; exit 0") - d.setVar("VAR", "val") - orighash = gettask_bashhash("mytask", d) - - # Changing a variable should change the hash - d.setVar("VAR", "val2") - nexthash = gettask_bashhash("mytask", d) - self.assertNotEqual(orighash, nexthash) - - d.setVar("VAR", "val") - # Adding an inactive removal shouldn't change the hash - d.setVar("BAR", "notbar") - d.setVar("MYCOMMAND:remove", "${BAR}") - nexthash = gettask_bashhash("mytask", d) - self.assertEqual(orighash, nexthash) - - # Adding an active removal should change the hash - d.setVar("BAR", "bar;") - nexthash = gettask_bashhash("mytask", d) - self.assertNotEqual(orighash, nexthash) - - # Setup an inactive contains() - d.setVar("VAR", "${@bb.utils.contains('VAR2', 'A', 'val', '', d)}") - orighash = gettask_bashhash("mytask", d) - - # Activate the contains() and the hash should change - d.setVar("VAR2", "A") - nexthash = gettask_bashhash("mytask", d) - self.assertNotEqual(orighash, nexthash) - - # The contains should be inactive but even though VAR2 has a - # different value the hash should match the original - d.setVar("VAR2", "B") - nexthash = gettask_bashhash("mytask", d) - self.assertEqual(orighash, nexthash) - -class Serialize(unittest.TestCase): - - def test_serialize(self): - import tempfile - import pickle - d = bb.data.init() - d.enableTracking() - d.setVar('HELLO', 'world') - d.setVarFlag('HELLO', 'other', 'planet') - with tempfile.NamedTemporaryFile(delete=False) as tmpfile: - tmpfilename = tmpfile.name - pickle.dump(d, tmpfile) - - with open(tmpfilename, 'rb') as f: - newd = pickle.load(f) - - os.remove(tmpfilename) - - self.assertEqual(d, newd) - self.assertEqual(newd.getVar('HELLO'), 'world') - self.assertEqual(newd.getVarFlag('HELLO', 'other'), 'planet') - - diff --git a/bitbake/lib/bb/tests/event.py b/bitbake/lib/bb/tests/event.py deleted file mode 100644 index ef61891d30..0000000000 --- a/bitbake/lib/bb/tests/event.py +++ /dev/null @@ -1,976 +0,0 @@ -# -# BitBake Tests for the Event implementation (event.py) -# -# Copyright (C) 2017 Intel Corporation -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import collections -import importlib -import logging -import pickle -import threading -import time -import unittest -import tempfile -from unittest.mock import Mock -from unittest.mock import call - -import bb -import bb.event -from bb.msg import BBLogFormatter - - -class EventQueueStubBase(object): - """ Base class for EventQueueStub classes """ - def __init__(self): - self.event_calls = [] - return - - def _store_event_data_string(self, event): - if isinstance(event, logging.LogRecord): - formatter = BBLogFormatter("%(levelname)s: %(message)s") - self.event_calls.append(formatter.format(event)) - else: - self.event_calls.append(bb.event.getName(event)) - return - - -class EventQueueStub(EventQueueStubBase): - """ Class used as specification for UI event handler queue stub objects """ - def __init__(self): - super(EventQueueStub, self).__init__() - - def send(self, event): - super(EventQueueStub, self)._store_event_data_string(event) - - -class PickleEventQueueStub(EventQueueStubBase): - """ Class used as specification for UI event handler queue stub objects - with sendpickle method """ - def __init__(self): - super(PickleEventQueueStub, self).__init__() - - def sendpickle(self, pickled_event): - event = pickle.loads(pickled_event) - super(PickleEventQueueStub, self)._store_event_data_string(event) - - -class UIClientStub(object): - """ Class used as specification for UI event handler stub objects """ - def __init__(self): - self.event = None - - -class EventHandlingTest(unittest.TestCase): - """ Event handling test class """ - - - def setUp(self): - self._test_process = Mock() - ui_client1 = UIClientStub() - ui_client2 = UIClientStub() - self._test_ui1 = Mock(wraps=ui_client1) - self._test_ui2 = Mock(wraps=ui_client2) - importlib.reload(bb.event) - - def _create_test_handlers(self): - """ Method used to create a test handler ordered dictionary """ - test_handlers = collections.OrderedDict() - test_handlers["handler1"] = self._test_process.handler1 - test_handlers["handler2"] = self._test_process.handler2 - return test_handlers - - def test_class_handlers(self): - """ Test set_class_handlers and get_class_handlers methods """ - test_handlers = self._create_test_handlers() - bb.event.set_class_handlers(test_handlers) - self.assertEqual(test_handlers, - bb.event.get_class_handlers()) - - def test_handlers(self): - """ Test set_handlers and get_handlers """ - test_handlers = self._create_test_handlers() - bb.event.set_handlers(test_handlers) - self.assertEqual(test_handlers, - bb.event.get_handlers()) - - def test_clean_class_handlers(self): - """ Test clean_class_handlers method """ - cleanDict = collections.OrderedDict() - self.assertEqual(cleanDict, - bb.event.clean_class_handlers()) - - def test_register(self): - """ Test register method for class handlers """ - result = bb.event.register("handler", self._test_process.handler) - self.assertEqual(result, bb.event.Registered) - handlers_dict = bb.event.get_class_handlers() - self.assertIn("handler", handlers_dict) - - def test_already_registered(self): - """ Test detection of an already registed class handler """ - bb.event.register("handler", self._test_process.handler) - handlers_dict = bb.event.get_class_handlers() - self.assertIn("handler", handlers_dict) - result = bb.event.register("handler", self._test_process.handler) - self.assertEqual(result, bb.event.AlreadyRegistered) - - def test_register_from_string(self): - """ Test register method receiving code in string """ - result = bb.event.register("string_handler", " return True") - self.assertEqual(result, bb.event.Registered) - handlers_dict = bb.event.get_class_handlers() - self.assertIn("string_handler", handlers_dict) - - def test_register_with_mask(self): - """ Test register method with event masking """ - mask = ["bb.event.OperationStarted", - "bb.event.OperationCompleted"] - result = bb.event.register("event_handler", - self._test_process.event_handler, - mask) - self.assertEqual(result, bb.event.Registered) - handlers_dict = bb.event.get_class_handlers() - self.assertIn("event_handler", handlers_dict) - - def test_remove(self): - """ Test remove method for class handlers """ - test_handlers = self._create_test_handlers() - bb.event.set_class_handlers(test_handlers) - count = len(test_handlers) - bb.event.remove("handler1", None) - test_handlers = bb.event.get_class_handlers() - self.assertEqual(len(test_handlers), count - 1) - with self.assertRaises(KeyError): - bb.event.remove("handler1", None) - - def test_execute_handler(self): - """ Test execute_handler method for class handlers """ - mask = ["bb.event.OperationProgress"] - result = bb.event.register("event_handler", - self._test_process.event_handler, - mask) - self.assertEqual(result, bb.event.Registered) - event = bb.event.OperationProgress(current=10, total=100) - bb.event.execute_handler("event_handler", - self._test_process.event_handler, - event, - None) - self._test_process.event_handler.assert_called_once_with(event, None) - - def test_fire_class_handlers(self): - """ Test fire_class_handlers method """ - mask = ["bb.event.OperationStarted"] - result = bb.event.register("event_handler1", - self._test_process.event_handler1, - mask) - self.assertEqual(result, bb.event.Registered) - result = bb.event.register("event_handler2", - self._test_process.event_handler2, - "*") - self.assertEqual(result, bb.event.Registered) - event1 = bb.event.OperationStarted() - event2 = bb.event.OperationCompleted(total=123) - bb.event.fire_class_handlers(event1, None) - bb.event.fire_class_handlers(event2, None) - bb.event.fire_class_handlers(event2, None) - expected_event_handler1 = [call(event1, None)] - expected_event_handler2 = [call(event1, None), - call(event2, None), - call(event2, None)] - self.assertEqual(self._test_process.event_handler1.call_args_list, - expected_event_handler1) - self.assertEqual(self._test_process.event_handler2.call_args_list, - expected_event_handler2) - - def test_class_handler_filters(self): - """ Test filters for class handlers """ - mask = ["bb.event.OperationStarted"] - result = bb.event.register("event_handler1", - self._test_process.event_handler1, - mask) - self.assertEqual(result, bb.event.Registered) - result = bb.event.register("event_handler2", - self._test_process.event_handler2, - "*") - self.assertEqual(result, bb.event.Registered) - bb.event.set_eventfilter( - lambda name, handler, event, d : - name == 'event_handler2' and - bb.event.getName(event) == "OperationStarted") - event1 = bb.event.OperationStarted() - event2 = bb.event.OperationCompleted(total=123) - bb.event.fire_class_handlers(event1, None) - bb.event.fire_class_handlers(event2, None) - bb.event.fire_class_handlers(event2, None) - expected_event_handler1 = [] - expected_event_handler2 = [call(event1, None)] - self.assertEqual(self._test_process.event_handler1.call_args_list, - expected_event_handler1) - self.assertEqual(self._test_process.event_handler2.call_args_list, - expected_event_handler2) - - def test_change_handler_event_mapping(self): - """ Test changing the event mapping for class handlers """ - event1 = bb.event.OperationStarted() - event2 = bb.event.OperationCompleted(total=123) - - # register handler for all events - result = bb.event.register("event_handler1", - self._test_process.event_handler1, - "*") - self.assertEqual(result, bb.event.Registered) - bb.event.fire_class_handlers(event1, None) - bb.event.fire_class_handlers(event2, None) - expected = [call(event1, None), call(event2, None)] - self.assertEqual(self._test_process.event_handler1.call_args_list, - expected) - - # unregister handler and register it only for OperationStarted - bb.event.remove("event_handler1", - self._test_process.event_handler1) - mask = ["bb.event.OperationStarted"] - result = bb.event.register("event_handler1", - self._test_process.event_handler1, - mask) - self.assertEqual(result, bb.event.Registered) - bb.event.fire_class_handlers(event1, None) - bb.event.fire_class_handlers(event2, None) - expected = [call(event1, None), call(event2, None), call(event1, None)] - self.assertEqual(self._test_process.event_handler1.call_args_list, - expected) - - # unregister handler and register it only for OperationCompleted - bb.event.remove("event_handler1", - self._test_process.event_handler1) - mask = ["bb.event.OperationCompleted"] - result = bb.event.register("event_handler1", - self._test_process.event_handler1, - mask) - self.assertEqual(result, bb.event.Registered) - bb.event.fire_class_handlers(event1, None) - bb.event.fire_class_handlers(event2, None) - expected = [call(event1,None), call(event2, None), call(event1, None), call(event2, None)] - self.assertEqual(self._test_process.event_handler1.call_args_list, - expected) - - def test_register_UIHhandler(self): - """ Test register_UIHhandler method """ - result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) - self.assertEqual(result, 1) - - def test_UIHhandler_already_registered(self): - """ Test registering an UIHhandler already existing """ - result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) - self.assertEqual(result, 1) - result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) - self.assertEqual(result, 2) - - def test_unregister_UIHhandler(self): - """ Test unregister_UIHhandler method """ - result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) - self.assertEqual(result, 1) - result = bb.event.unregister_UIHhandler(1) - self.assertIs(result, None) - - def test_fire_ui_handlers(self): - """ Test fire_ui_handlers method """ - self._test_ui1.event = Mock(spec_set=EventQueueStub) - result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) - self.assertEqual(result, 1) - self._test_ui2.event = Mock(spec_set=PickleEventQueueStub) - result = bb.event.register_UIHhandler(self._test_ui2, mainui=True) - self.assertEqual(result, 2) - event1 = bb.event.OperationStarted() - bb.event.fire_ui_handlers(event1, None) - expected = [call(event1)] - self.assertEqual(self._test_ui1.event.send.call_args_list, - expected) - expected = [call(pickle.dumps(event1))] - self.assertEqual(self._test_ui2.event.sendpickle.call_args_list, - expected) - - def test_ui_handler_mask_filter(self): - """ Test filters for UI handlers """ - mask = ["bb.event.OperationStarted"] - debug_domains = {} - self._test_ui1.event = Mock(spec_set=EventQueueStub) - result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) - bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask) - self._test_ui2.event = Mock(spec_set=PickleEventQueueStub) - result = bb.event.register_UIHhandler(self._test_ui2, mainui=True) - bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask) - - event1 = bb.event.OperationStarted() - event2 = bb.event.OperationCompleted(total=1) - - bb.event.fire_ui_handlers(event1, None) - bb.event.fire_ui_handlers(event2, None) - expected = [call(event1)] - self.assertEqual(self._test_ui1.event.send.call_args_list, - expected) - expected = [call(pickle.dumps(event1))] - self.assertEqual(self._test_ui2.event.sendpickle.call_args_list, - expected) - - def test_ui_handler_log_filter(self): - """ Test log filters for UI handlers """ - mask = ["*"] - debug_domains = {'BitBake.Foo': logging.WARNING} - - self._test_ui1.event = EventQueueStub() - result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) - bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask) - self._test_ui2.event = PickleEventQueueStub() - result = bb.event.register_UIHhandler(self._test_ui2, mainui=True) - bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask) - - event1 = bb.event.OperationStarted() - bb.event.fire_ui_handlers(event1, None) # All events match - - event_log_handler = bb.event.LogHandler() - logger = logging.getLogger("BitBake") - logger.addHandler(event_log_handler) - logger1 = logging.getLogger("BitBake.Foo") - logger1.warning("Test warning LogRecord1") # Matches debug_domains level - logger1.info("Test info LogRecord") # Filtered out - logger2 = logging.getLogger("BitBake.Bar") - logger2.error("Test error LogRecord") # Matches filter base level - logger2.warning("Test warning LogRecord2") # Filtered out - logger.removeHandler(event_log_handler) - - expected = ['OperationStarted', - 'WARNING: Test warning LogRecord1', - 'ERROR: Test error LogRecord'] - self.assertEqual(self._test_ui1.event.event_calls, expected) - self.assertEqual(self._test_ui2.event.event_calls, expected) - - def test_fire(self): - """ Test fire method used to trigger class and ui event handlers """ - mask = ["bb.event.ConfigParsed"] - result = bb.event.register("event_handler1", - self._test_process.event_handler1, - mask) - - self._test_ui1.event = Mock(spec_set=EventQueueStub) - result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) - self.assertEqual(result, 1) - - event1 = bb.event.ConfigParsed() - bb.event.fire(event1, None) - expected = [call(event1, None)] - self.assertEqual(self._test_process.event_handler1.call_args_list, - expected) - expected = [call(event1)] - self.assertEqual(self._test_ui1.event.send.call_args_list, - expected) - - def test_fire_from_worker(self): - """ Test fire_from_worker method """ - self._test_ui1.event = Mock(spec_set=EventQueueStub) - result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) - self.assertEqual(result, 1) - event1 = bb.event.ConfigParsed() - bb.event.fire_from_worker(event1, None) - expected = [call(event1)] - self.assertEqual(self._test_ui1.event.send.call_args_list, - expected) - - def test_worker_fire(self): - """ Test the triggering of bb.event.worker_fire callback """ - bb.event.worker_fire = Mock() - event = bb.event.Event() - bb.event.fire(event, None) - expected = [call(event, None)] - self.assertEqual(bb.event.worker_fire.call_args_list, expected) - - def test_print_ui_queue(self): - """ Test print_ui_queue method """ - event1 = bb.event.OperationStarted() - event2 = bb.event.OperationCompleted(total=123) - bb.event.fire(event1, None) - bb.event.fire(event2, None) - event_log_handler = bb.event.LogHandler() - logger = logging.getLogger("BitBake") - logger.addHandler(event_log_handler) - logger.info("Test info LogRecord") - logger.warning("Test warning LogRecord") - with self.assertLogs("BitBake", level="INFO") as cm: - bb.event.print_ui_queue() - logger.removeHandler(event_log_handler) - self.assertEqual(cm.output, - ["INFO:BitBake:Test info LogRecord", - "WARNING:BitBake:Test warning LogRecord"]) - - def _set_threadlock_test_mockups(self): - """ Create UI event handler mockups used in enable and disable - threadlock tests """ - def ui1_event_send(event): - if type(event) is bb.event.ConfigParsed: - self._threadlock_test_calls.append("w1_ui1") - if type(event) is bb.event.OperationStarted: - self._threadlock_test_calls.append("w2_ui1") - time.sleep(2) - - def ui2_event_send(event): - if type(event) is bb.event.ConfigParsed: - self._threadlock_test_calls.append("w1_ui2") - if type(event) is bb.event.OperationStarted: - self._threadlock_test_calls.append("w2_ui2") - time.sleep(2) - - self._threadlock_test_calls = [] - self._test_ui1.event = EventQueueStub() - self._test_ui1.event.send = ui1_event_send - result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) - self.assertEqual(result, 1) - self._test_ui2.event = EventQueueStub() - self._test_ui2.event.send = ui2_event_send - result = bb.event.register_UIHhandler(self._test_ui2, mainui=True) - self.assertEqual(result, 2) - - def _set_and_run_threadlock_test_workers(self): - """ Create and run the workers used to trigger events in enable and - disable threadlock tests """ - worker1 = threading.Thread(target=self._thread_lock_test_worker1) - worker2 = threading.Thread(target=self._thread_lock_test_worker2) - worker1.start() - time.sleep(1) - worker2.start() - worker1.join() - worker2.join() - - def _thread_lock_test_worker1(self): - """ First worker used to fire the ConfigParsed event for enable and - disable threadlocks tests """ - bb.event.fire(bb.event.ConfigParsed(), None) - - def _thread_lock_test_worker2(self): - """ Second worker used to fire the OperationStarted event for enable - and disable threadlocks tests """ - bb.event.fire(bb.event.OperationStarted(), None) - - def test_event_threadlock(self): - """ Test enable_threadlock method """ - self._set_threadlock_test_mockups() - self._set_and_run_threadlock_test_workers() - # Calls to UI handlers should be in order as all the registered - # handlers for the event coming from the first worker should be - # called before processing the event from the second worker. - self.assertEqual(self._threadlock_test_calls, - ["w1_ui1", "w1_ui2", "w2_ui1", "w2_ui2"]) - -class EventClassesTest(unittest.TestCase): - """ Event classes test class """ - - _worker_pid = 54321 - - def setUp(self): - bb.event.worker_pid = EventClassesTest._worker_pid - self.d = bb.data.init() - bb.parse.siggen = bb.siggen.init(self.d) - - def test_Event(self): - """ Test the Event base class """ - event = bb.event.Event() - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_HeartbeatEvent(self): - """ Test the HeartbeatEvent class """ - time = 10 - event = bb.event.HeartbeatEvent(time) - self.assertEqual(event.time, time) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_OperationStarted(self): - """ Test OperationStarted event class """ - msg = "Foo Bar" - event = bb.event.OperationStarted(msg) - self.assertEqual(event.msg, msg) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_OperationCompleted(self): - """ Test OperationCompleted event class """ - msg = "Foo Bar" - total = 123 - event = bb.event.OperationCompleted(total, msg) - self.assertEqual(event.msg, msg) - self.assertEqual(event.total, total) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_OperationProgress(self): - """ Test OperationProgress event class """ - msg = "Foo Bar" - total = 123 - current = 111 - event = bb.event.OperationProgress(current, total, msg) - self.assertEqual(event.msg, msg + ": %s/%s" % (current, total)) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_ConfigParsed(self): - """ Test the ConfigParsed class """ - event = bb.event.ConfigParsed() - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_MultiConfigParsed(self): - """ Test MultiConfigParsed event class """ - mcdata = {"foobar": "Foo Bar"} - event = bb.event.MultiConfigParsed(mcdata) - self.assertEqual(event.mcdata, mcdata) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_RecipeEvent(self): - """ Test RecipeEvent event base class """ - callback = lambda a: 2 * a - event = bb.event.RecipeEvent(callback) - self.assertEqual(event.fn(1), callback(1)) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_RecipePreFinalise(self): - """ Test RecipePreFinalise event class """ - callback = lambda a: 2 * a - event = bb.event.RecipePreFinalise(callback) - self.assertEqual(event.fn(1), callback(1)) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_RecipeTaskPreProcess(self): - """ Test RecipeTaskPreProcess event class """ - callback = lambda a: 2 * a - tasklist = [("foobar", callback)] - event = bb.event.RecipeTaskPreProcess(callback, tasklist) - self.assertEqual(event.fn(1), callback(1)) - self.assertEqual(event.tasklist, tasklist) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_RecipeParsed(self): - """ Test RecipeParsed event base class """ - callback = lambda a: 2 * a - event = bb.event.RecipeParsed(callback) - self.assertEqual(event.fn(1), callback(1)) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_BuildBase(self): - """ Test base class for bitbake build events """ - name = "foo" - pkgs = ["bar"] - failures = 123 - event = bb.event.BuildBase(name, pkgs, failures) - self.assertEqual(event.name, name) - self.assertEqual(event.pkgs, pkgs) - self.assertEqual(event.getFailures(), failures) - name = event.name = "bar" - pkgs = event.pkgs = ["foo"] - self.assertEqual(event.name, name) - self.assertEqual(event.pkgs, pkgs) - self.assertEqual(event.getFailures(), failures) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_BuildInit(self): - """ Test class for bitbake build invocation events """ - event = bb.event.BuildInit() - self.assertEqual(event.name, None) - self.assertEqual(event.pkgs, []) - self.assertEqual(event.getFailures(), 0) - name = event.name = "bar" - pkgs = event.pkgs = ["foo"] - self.assertEqual(event.name, name) - self.assertEqual(event.pkgs, pkgs) - self.assertEqual(event.getFailures(), 0) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_BuildStarted(self): - """ Test class for build started events """ - name = "foo" - pkgs = ["bar"] - failures = 123 - event = bb.event.BuildStarted(name, pkgs, failures) - self.assertEqual(event.name, name) - self.assertEqual(event.pkgs, pkgs) - self.assertEqual(event.getFailures(), failures) - self.assertEqual(event.msg, "Building Started") - name = event.name = "bar" - pkgs = event.pkgs = ["foo"] - msg = event.msg = "foobar" - self.assertEqual(event.name, name) - self.assertEqual(event.pkgs, pkgs) - self.assertEqual(event.getFailures(), failures) - self.assertEqual(event.msg, msg) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_BuildCompleted(self): - """ Test class for build completed events """ - total = 1000 - name = "foo" - pkgs = ["bar"] - failures = 123 - interrupted = 1 - event = bb.event.BuildCompleted(total, name, pkgs, failures, - interrupted) - self.assertEqual(event.name, name) - self.assertEqual(event.pkgs, pkgs) - self.assertEqual(event.getFailures(), failures) - self.assertEqual(event.msg, "Building Failed") - event2 = bb.event.BuildCompleted(total, name, pkgs) - self.assertEqual(event2.name, name) - self.assertEqual(event2.pkgs, pkgs) - self.assertEqual(event2.getFailures(), 0) - self.assertEqual(event2.msg, "Building Succeeded") - self.assertEqual(event2.pid, EventClassesTest._worker_pid) - - def test_DiskFull(self): - """ Test DiskFull event class """ - dev = "/dev/foo" - type = "ext4" - freespace = "104M" - mountpoint = "/" - event = bb.event.DiskFull(dev, type, freespace, mountpoint) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_MonitorDiskEvent(self): - """ Test MonitorDiskEvent class """ - available_bytes = 10000000 - free_bytes = 90000000 - total_bytes = 1000000000 - du = bb.event.DiskUsageSample(available_bytes, free_bytes, - total_bytes) - event = bb.event.MonitorDiskEvent(du) - self.assertEqual(event.disk_usage.available_bytes, available_bytes) - self.assertEqual(event.disk_usage.free_bytes, free_bytes) - self.assertEqual(event.disk_usage.total_bytes, total_bytes) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_NoProvider(self): - """ Test NoProvider event class """ - item = "foobar" - event1 = bb.event.NoProvider(item) - self.assertEqual(event1.getItem(), item) - self.assertEqual(event1.isRuntime(), False) - self.assertEqual(str(event1), "Nothing PROVIDES 'foobar'") - runtime = True - dependees = ["foo", "bar"] - reasons = None - close_matches = ["foibar", "footbar"] - event2 = bb.event.NoProvider(item, runtime, dependees, reasons, - close_matches) - self.assertEqual(event2.isRuntime(), True) - expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS" - " on or otherwise requires it). Close matches:\n" - " foibar\n" - " footbar") - self.assertEqual(str(event2), expected) - reasons = ["Item does not exist on database"] - close_matches = ["foibar", "footbar"] - event3 = bb.event.NoProvider(item, runtime, dependees, reasons, - close_matches) - expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS" - " on or otherwise requires it)\n" - "Item does not exist on database") - self.assertEqual(str(event3), expected) - self.assertEqual(event3.pid, EventClassesTest._worker_pid) - - def test_MultipleProviders(self): - """ Test MultipleProviders event class """ - item = "foobar" - candidates = ["foobarv1", "foobars"] - event1 = bb.event.MultipleProviders(item, candidates) - self.assertEqual(event1.isRuntime(), False) - self.assertEqual(event1.getItem(), item) - self.assertEqual(event1.getCandidates(), candidates) - expected = ("Multiple providers are available for foobar (foobarv1," - " foobars)\n" - "Consider defining a PREFERRED_PROVIDER entry to match " - "foobar") - self.assertEqual(str(event1), expected) - runtime = True - event2 = bb.event.MultipleProviders(item, candidates, runtime) - self.assertEqual(event2.isRuntime(), runtime) - expected = ("Multiple providers are available for runtime foobar " - "(foobarv1, foobars)\n" - "Consider defining a PREFERRED_RPROVIDER entry to match " - "foobar") - self.assertEqual(str(event2), expected) - self.assertEqual(event2.pid, EventClassesTest._worker_pid) - - def test_ParseStarted(self): - """ Test ParseStarted event class """ - total = 123 - event = bb.event.ParseStarted(total) - self.assertEqual(event.msg, "Recipe parsing Started") - self.assertEqual(event.total, total) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_ParseCompleted(self): - """ Test ParseCompleted event class """ - cached = 10 - parsed = 13 - skipped = 7 - virtuals = 2 - masked = 1 - errors = 0 - total = 23 - event = bb.event.ParseCompleted(cached, parsed, skipped, masked, - virtuals, errors, total) - self.assertEqual(event.msg, "Recipe parsing Completed") - expected = [cached, parsed, skipped, virtuals, masked, errors, - cached + parsed, total] - actual = [event.cached, event.parsed, event.skipped, event.virtuals, - event.masked, event.errors, event.sofar, event.total] - self.assertEqual(str(actual), str(expected)) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_ParseProgress(self): - """ Test ParseProgress event class """ - current = 10 - total = 100 - event = bb.event.ParseProgress(current, total) - self.assertEqual(event.msg, - "Recipe parsing" + ": %s/%s" % (current, total)) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_CacheLoadStarted(self): - """ Test CacheLoadStarted event class """ - total = 123 - event = bb.event.CacheLoadStarted(total) - self.assertEqual(event.msg, "Loading cache Started") - self.assertEqual(event.total, total) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_CacheLoadProgress(self): - """ Test CacheLoadProgress event class """ - current = 10 - total = 100 - event = bb.event.CacheLoadProgress(current, total) - self.assertEqual(event.msg, - "Loading cache" + ": %s/%s" % (current, total)) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_CacheLoadCompleted(self): - """ Test CacheLoadCompleted event class """ - total = 23 - num_entries = 12 - event = bb.event.CacheLoadCompleted(total, num_entries) - self.assertEqual(event.msg, "Loading cache Completed") - expected = [total, num_entries] - actual = [event.total, event.num_entries] - self.assertEqual(str(actual), str(expected)) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_TreeDataPreparationStarted(self): - """ Test TreeDataPreparationStarted event class """ - event = bb.event.TreeDataPreparationStarted() - self.assertEqual(event.msg, "Preparing tree data Started") - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_TreeDataPreparationProgress(self): - """ Test TreeDataPreparationProgress event class """ - current = 10 - total = 100 - event = bb.event.TreeDataPreparationProgress(current, total) - self.assertEqual(event.msg, - "Preparing tree data" + ": %s/%s" % (current, total)) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_TreeDataPreparationCompleted(self): - """ Test TreeDataPreparationCompleted event class """ - total = 23 - event = bb.event.TreeDataPreparationCompleted(total) - self.assertEqual(event.msg, "Preparing tree data Completed") - self.assertEqual(event.total, total) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_DepTreeGenerated(self): - """ Test DepTreeGenerated event class """ - depgraph = Mock() - event = bb.event.DepTreeGenerated(depgraph) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_TargetsTreeGenerated(self): - """ Test TargetsTreeGenerated event class """ - model = Mock() - event = bb.event.TargetsTreeGenerated(model) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_ReachableStamps(self): - """ Test ReachableStamps event class """ - stamps = [Mock(), Mock()] - event = bb.event.ReachableStamps(stamps) - self.assertEqual(event.stamps, stamps) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_FilesMatchingFound(self): - """ Test FilesMatchingFound event class """ - pattern = "foo.*bar" - matches = ["foobar"] - event = bb.event.FilesMatchingFound(pattern, matches) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_ConfigFilesFound(self): - """ Test ConfigFilesFound event class """ - variable = "FOO_BAR" - values = ["foo", "bar"] - event = bb.event.ConfigFilesFound(variable, values) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_ConfigFilePathFound(self): - """ Test ConfigFilePathFound event class """ - path = "/foo/bar" - event = bb.event.ConfigFilePathFound(path) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_message_classes(self): - """ Test message event classes """ - msg = "foobar foo bar" - event = bb.event.MsgBase(msg) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - event = bb.event.MsgDebug(msg) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - event = bb.event.MsgNote(msg) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - event = bb.event.MsgWarn(msg) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - event = bb.event.MsgError(msg) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - event = bb.event.MsgFatal(msg) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - event = bb.event.MsgPlain(msg) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_LogExecTTY(self): - """ Test LogExecTTY event class """ - msg = "foo bar" - prog = "foo.sh" - sleep_delay = 10 - retries = 3 - event = bb.event.LogExecTTY(msg, prog, sleep_delay, retries) - self.assertEqual(event.msg, msg) - self.assertEqual(event.prog, prog) - self.assertEqual(event.sleep_delay, sleep_delay) - self.assertEqual(event.retries, retries) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def _throw_zero_division_exception(self): - a = 1 / 0 - return - - def _worker_handler(self, event, d): - self._returned_event = event - return - - def test_LogHandler(self): - """ Test LogHandler class """ - logger = logging.getLogger("TestEventClasses") - logger.propagate = False - handler = bb.event.LogHandler(logging.INFO) - logger.addHandler(handler) - bb.event.worker_fire = self._worker_handler - try: - self._throw_zero_division_exception() - except ZeroDivisionError as ex: - logger.exception(ex) - event = self._returned_event - try: - pe = pickle.dumps(event) - newevent = pickle.loads(pe) - except: - self.fail('Logged event is not serializable') - self.assertEqual(event.taskpid, EventClassesTest._worker_pid) - - def test_MetadataEvent(self): - """ Test MetadataEvent class """ - eventtype = "footype" - eventdata = {"foo": "bar"} - event = bb.event.MetadataEvent(eventtype, eventdata) - self.assertEqual(event.type, eventtype) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_ProcessStarted(self): - """ Test ProcessStarted class """ - processname = "foo" - total = 9783128974 - event = bb.event.ProcessStarted(processname, total) - self.assertEqual(event.processname, processname) - self.assertEqual(event.total, total) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_ProcessProgress(self): - """ Test ProcessProgress class """ - processname = "foo" - progress = 243224 - event = bb.event.ProcessProgress(processname, progress) - self.assertEqual(event.processname, processname) - self.assertEqual(event.progress, progress) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_ProcessFinished(self): - """ Test ProcessFinished class """ - processname = "foo" - total = 1242342344 - event = bb.event.ProcessFinished(processname) - self.assertEqual(event.processname, processname) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_SanityCheck(self): - """ Test SanityCheck class """ - event1 = bb.event.SanityCheck() - self.assertEqual(event1.generateevents, True) - self.assertEqual(event1.pid, EventClassesTest._worker_pid) - generateevents = False - event2 = bb.event.SanityCheck(generateevents) - self.assertEqual(event2.generateevents, generateevents) - self.assertEqual(event2.pid, EventClassesTest._worker_pid) - - def test_SanityCheckPassed(self): - """ Test SanityCheckPassed class """ - event = bb.event.SanityCheckPassed() - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_SanityCheckFailed(self): - """ Test SanityCheckFailed class """ - msg = "The sanity test failed." - event1 = bb.event.SanityCheckFailed(msg) - self.assertEqual(event1.pid, EventClassesTest._worker_pid) - network_error = True - event2 = bb.event.SanityCheckFailed(msg, network_error) - self.assertEqual(event2.pid, EventClassesTest._worker_pid) - - def test_network_event_classes(self): - """ Test network event classes """ - event1 = bb.event.NetworkTest() - generateevents = False - self.assertEqual(event1.pid, EventClassesTest._worker_pid) - event2 = bb.event.NetworkTest(generateevents) - self.assertEqual(event2.pid, EventClassesTest._worker_pid) - event3 = bb.event.NetworkTestPassed() - self.assertEqual(event3.pid, EventClassesTest._worker_pid) - event4 = bb.event.NetworkTestFailed() - self.assertEqual(event4.pid, EventClassesTest._worker_pid) - - def test_FindSigInfoResult(self): - """ Test FindSigInfoResult event class """ - result = [Mock()] - event = bb.event.FindSigInfoResult(result) - self.assertEqual(event.result, result) - self.assertEqual(event.pid, EventClassesTest._worker_pid) - - def test_lineno_in_eventhandler(self): - # The error lineno is 5, not 4 since the first line is '\n' - error_line = """ -# Comment line1 -# Comment line2 -python test_lineno_in_eventhandler() { - This is an error line -} -addhandler test_lineno_in_eventhandler -test_lineno_in_eventhandler[eventmask] = "bb.event.ConfigParsed" -""" - - with self.assertLogs() as logs: - f = tempfile.NamedTemporaryFile(suffix = '.bb') - f.write(bytes(error_line, "utf-8")) - f.flush() - d = bb.parse.handle(f.name, self.d)[''] - - output = "".join(logs.output) - self.assertTrue(" line 5\n" in output) diff --git a/bitbake/lib/bb/tests/fetch-testdata/apple/cups/releases b/bitbake/lib/bb/tests/fetch-testdata/apple/cups/releases deleted file mode 100644 index f8934f56fa..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/apple/cups/releases +++ /dev/null @@ -1,2400 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Releases · apple/cups · GitHub - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Skip to content - - - - - - - - - - - -
    - -
    - - -
    - -
    - - - -
    -
    -
    - - - - - - - - - - - - - - - - - - -
    -
    - - - - - - - - -
    - - - -
    - -
    -
    -
    - - Latest release - - - -
    - - - - -
    - -
    -
    -
    -
    - v2.3.1 -
    - - - Latest release - - -
    - - -

    - @michaelrsweet - michaelrsweet - released this - Dec 13, 2019 -

    -
    - - - -
    -

    CUPS 2.3.1 is a general bug fix release, including a fix for CVE-2019-2228. Changes include:

    -
      -
    • Documentation updates (Issue #5661, #5674, #5682)
    • -
    • CVE-2019-2228: The ippSetValuetag function did not validate the default
      -language value.
    • -
    • Fixed a crash bug in the web interface (Issue #5621)
    • -
    • The PPD cache code now looks up page sizes using their dimensions
      -(Issue #5633)
    • -
    • PPD files containing "custom" option keywords did not work (Issue #5639)
    • -
    • Added a workaround for the scheduler's systemd support (Issue #5640)
    • -
    • On Windows, TLS certificates generated on February 29 would likely fail
      -(Issue #5643)
    • -
    • Added a DigestOptions directive for the client.conf file to control whether
      -MD5-based Digest authentication is allowed (Issue #5647)
    • -
    • Fixed a bug in the handling of printer resource files (Issue #5652)
    • -
    • The libusb-based USB backend now reports an error when the distribution
      -permissions are wrong (Issue #5658)
    • -
    • Added paint can labels to Dymo driver (Issue #5662)
    • -
    • The ippeveprinter program now supports authentication (Issue #5665)
    • -
    • The ippeveprinter program now advertises DNS-SD services on the correct
      -interfaces, and provides a way to turn them off (Issue #5666)
    • -
    • The --with-dbusdir option was ignored by the configure script (Issue #5671)
    • -
    • Sandboxed applications were not able to get the default printer (Issue #5676)
    • -
    • Log file access controls were not preserved by cupsctl (Issue #5677)
    • -
    • Default printers set with lpoptions did not work in all cases (Issue #5681,
      -Issue #5683, Issue #5684)
    • -
    • Fixed an error in the jobs web interface template (Issue #5694)
    • -
    • Fixed an off-by-one error in ippEnumString (Issue #5695)
    • -
    • Fixed some new compiler warnings (Issue #5700)
    • -
    • Fixed a few issues with the Apple Raster support (rdar://55301114)
    • -
    • The IPP backend did not detect all cases where a job should be retried using
      -a raster format (rdar://56021091)
    • -
    • Fixed spelling of "fold-accordion".
    • -
    • Fixed the default common name for TLS certificates used by ippeveprinter.
    • -
    • Fixed the option names used for IPP Everywhere finishing options.
    • -
    • Added support for the second roll of the DYMO Twin/DUO label printers.
    • -
    -

    Enjoy!

    -
    - - -
    - -
    - - - Assets - 4 -
    -
    - -
    - - -
    -
    - -
    - - - -
    - -
    -
    -
    - - -
    - - - - -
    - -
    -
    -
    -
    - v2.2.13 -
    - - -
    - - -

    - @michaelrsweet - michaelrsweet - released this - Dec 13, 2019 - · - - 793 commits - to master - since this release -

    -
    - - - -
    -

    CUPS 2.2.13 is the last general bug fix release in the 2.2.x series and includes
    -a fix for CVE-2019-2228. Changes include:

    -
      -
    • CVE-2019-2228: The ippSetValuetag function did not validate the default
      -language value.
    • -
    • Added a workaround for the scheduler's systemd support (Issue #5640)
    • -
    • Fixed spelling of "fold-accordion".
    • -
    • Fixed the default common name for TLS certificates used by ippserver.
    • -
    • The libusb-based USB backend now reports an error when the distribution
      -permissions are wrong (Issue #5658)
    • -
    • Default printers set with lpoptions did not work in all cases (Issue #5681,
      -Issue #5683, Issue #5684)
    • -
    • Fixed an off-by-one error in ippEnumString (Issue #5695)
    • -
    • Fixed some new compiler warnings (Issue #5700)
    • -
    • Fixed a few issues with the Apple Raster support (rdar://55301114)
    • -
    • The IPP backend did not detect all cases where a job should be retried using
      -a raster format (rdar://56021091)
    • -
    -

    Enjoy!

    -
    - - -
    - -
    - - - Assets - 4 -
    -
    - -
    - - -
    -
    - -
    - - - -
    - -
    -
    -
    - - -
    - - - - -
    - -
    -
    -
    -
    - v2.3.0 -
    - - -
    - - -

    - @michaelrsweet - michaelrsweet - released this - Aug 23, 2019 - · - - 65 commits - to master - since this release -

    -
    - - - -
    -

    CUPS 2.3.0 is now available for download, which adopts the new CUPS license, adds support for IPP presets and finishing templates, fixes a number of bugs and "polish" issues, and includes the new ippeveprinter utility. Changes include:

    -
      -
    • CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
    • -
    • Added a GPL2/LGPL2 exception to the new CUPS license terms.
    • -
    • Documentation updates (Issue #5604)
    • -
    • Localization updates (Issue #5637)
    • -
    • Fixed a bug in the scheduler job cleanup code (Issue #5588)
    • -
    • Fixed builds when there is no TLS library (Issue #5590)
    • -
    • Eliminated some new GCC compiler warnings (Issue #5591)
    • -
    • Removed dead code from the scheduler (Issue #5593)
    • -
    • "make" failed with GZIP options (Issue #5595)
    • -
    • Fixed potential excess logging from the scheduler when removing job files
      -(Issue #5597)
    • -
    • Fixed a NULL pointer dereference bug in httpGetSubField2 (Issue #5598)
    • -
    • Added FIPS-140 workarounds for GNU TLS (Issue #5601, Issue #5622)
    • -
    • The scheduler no longer provides a default value for the description
      -(Issue #5603)
    • -
    • The scheduler now logs jobs held for authentication using the error level so
      -it is clear what happened (Issue #5604)
    • -
    • The lpadmin command did not always update the PPD file for changes to the
      -cupsIPPSupplies and cupsSNMPSupplies keywords (Issue #5610)
    • -
    • The scheduler now uses both the group's membership list as well as the
      -various OS-specific membership functions to determine whether a user belongs
      -to a named group (Issue #5613)
    • -
    • Added USB quirks rule for HP LaserJet 1015 (Issue #5617)
    • -
    • Fixed some PPD parser issues (Issue #5623, Issue #5624)
    • -
    • The IPP parser no longer allows invalid member attributes in collections
      -(Issue #5630)
    • -
    • The configure script now treats the "wheel" group as a potential system
      -group (Issue #5638)
    • -
    • Fixed a USB printing issue on macOS (rdar://31433931)
    • -
    • Fixed IPP buffer overflow (rdar://50035411)
    • -
    • Fixed memory disclosure issue in the scheduler (rdar://51373853)
    • -
    • Fixed DoS issues in the scheduler (rdar://51373929)
    • -
    • Fixed an issue with unsupported "sides" values in the IPP backend
      -(rdar://51775322)
    • -
    • The scheduler would restart continuously when idle and printers were not
      -shared (rdar://52561199)
    • -
    • Fixed an issue with EXPECT !name WITH-VALUE ... tests.
    • -
    • Fixed a command ordering issue in the Zebra ZPL driver.
    • -
    • Fixed a memory leak in ppdOpen.
    • -
    -

    Enjoy!

    -
    - - -
    - -
    - - - Assets - 4 -
    -
    - -
    - - -
    -
    - -
    - - - -
    - -
    -
    -
    - - -
    - - - - -
    - -
    -
    -
    -
    - v2.2.12 -
    - - -
    - - -

    - @michaelrsweet - michaelrsweet - released this - Aug 15, 2019 - · - - 793 commits - to master - since this release -

    -
    - - - -
    -

    CUPS 2.2.12 is now available and includes security, compatibility, and general bug fixes. Changes include:

    -
      -
    • CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
    • -
    • The cupsctl command now prevents setting "cups-files.conf" directives
      -(Issue #5530)
    • -
    • Updated the systemd service file for cupsd (Issue #5551)
    • -
    • The cupsCheckDestSupported function did not check octetString values
      -correctly (Issue #5557)
    • -
    • The scheduler did not encode octetString values like "job-password" correctly
      -for the print filters (Issue #5558)
    • -
    • Restored minimal support for the Emulators keyword in PPD files to allow
      -old Samsung printer drivers to continue to work (Issue #5562)
    • -
    • Timed out job submission now yields an error (Issue #5570)
    • -
    • The footer in the web interface covered some content on small displays
      -(Issue #5574)
    • -
    • The libusb-based USB backend now enforces read limits, improving print speed
      -in many cases (Issue #5583)
    • -
    • Fixed some compatibility issues with old releases of CUPS (Issue #5587)
    • -
    • Fixed a bug in the scheduler job cleanup code (Issue #5588)
    • -
    • "make" failed with GZIP options (Issue #5595)
    • -
    • Added FIPS-140 workarounds for GNU TLS (Issue #5601, Issue #5622)
    • -
    • The scheduler no longer provides a default value for the description
      -(Issue #5603)
    • -
    • The lpadmin command did not always update the PPD file for changes to the
      -cupsIPPSupplies and cupsSNMPSupplies keywords (Issue #5610)
    • -
    • The scheduler now uses both the group's membership list as well as the
      -various OS-specific membership functions to determine whether a user belongs
      -to a named group (Issue #5613)
    • -
    • Added USB quirks rule for HP LaserJet 1015 (Issue #5617)
    • -
    • Fixed some PPD parser issues (Issue #5623, Issue #5624)
    • -
    • The IPP parser no longer allows invalid member attributes in collections
      -(Issue #5630)
    • -
    • Fixed IPP buffer overflow (rdar://50035411)
    • -
    • Fixed memory disclosure issue in the scheduler (rdar://51373853)
    • -
    • Fixed DoS issues in the scheduler (rdar://51373929)
    • -
    • The scheduler would restart continuously when idle and printers were not
      -shared (rdar://52561199)
    • -
    • Fixed a command ordering issue in the Zebra ZPL driver.
    • -
    • Fixed a memory leak in ppdOpen.
    • -
    -

    Enjoy!

    -
    - - -
    - -
    - - - Assets - 4 -
    -
    - -
    - - -
    -
    - -
    - - - -
    - -
    -
    -
    - - Pre-release - - - -
    - - - - -
    - -
    -
    -
    -
    - v2.3rc1 -
    - - - Pre-release - - -
    - - -

    - @michaelrsweet - michaelrsweet - released this - May 21, 2019 - · - - 122 commits - to master - since this release -

    -
    - - - -
    -

    CUPS 2.3rc1 is now available for download. This is the first release candidate for CUPS 2.3.0 which adopts the new CUPS license, adds support for IPP presets and finishing templates, and fixes a number of bugs and "polish" issues. This beta also includes the new ippeveprinter utility. Changes include:

    -
      -
    • The cups-config script no longer adds extra libraries when linking against
      -shared libraries (Issue #5261)
    • -
    • The supplied example print documents have been optimized for size
      -(Issue #5529)
    • -
    • The cupsctl command now prevents setting "cups-files.conf" directives
      -(Issue #5530)
    • -
    • The "forbidden" message in the web interface is now explained (Issue #5547)
    • -
    • The footer in the web interface covered some content on small displays
      -(Issue #5574)
    • -
    • The libusb-based USB backend now enforces read limits, improving print speed
      -in many cases (Issue #5583)
    • -
    • The ippeveprinter command now looks for print commands in the "command"
      -subdirectory.
    • -
    • The ipptool command now supports $date-current and $date-start variables
      -to insert the current and starting date and time values, as well as ISO-8601
      -relative time values such as "PT30S" for 30 seconds in the future.
    • -
    -

    Enjoy!

    -
    - - -
    - -
    - - - Assets - 4 -
    -
    - -
    - - -
    -
    - -
    - - - -
    - -
    -
    -
    - - Pre-release - - - -
    - - - - -
    - -
    -
    -
    -
    - v2.3b8 -
    - - - Pre-release - - -
    - - -

    - @michaelrsweet - michaelrsweet - released this - May 2, 2019 - · - - 165 commits - to master - since this release -

    -
    - - - -
    -

    CUPS 2.3b8 is now available for download. This is the eighth beta of the CUPS 2.3 series which adopts the new CUPS license, adds support for IPP presets and finishing templates, and fixes a number of bugs and "polish" issues. This beta also includes the new ippeveprinter utility. Changes include:

    -
      -
    • Media size matching now uses a tolerance of 0.5mm (rdar://33822024)
    • -
    • The lpadmin command would hang with a bad PPD file (rdar://41495016)
    • -
    • Fixed a potential crash bug in cups-driverd (rdar://46625579)
    • -
    • Fixed a performance regression with large PPDs (rdar://47040759)
    • -
    • Fixed a memory reallocation bug in HTTP header value expansion
      -(rdar://problem/50000749)
    • -
    • Timed out job submission now yields an error (Issue #5570)
    • -
    • Restored minimal support for the Emulators keyword in PPD files to allow
      -old Samsung printer drivers to continue to work (Issue #5562)
    • -
    • The scheduler did not encode octetString values like "job-password" correctly
      -for the print filters (Issue #5558)
    • -
    • The cupsCheckDestSupported function did not check octetString values
      -correctly (Issue #5557)
    • -
    • Added support for UserAgentTokens directive in "client.conf" (Issue #5555)
    • -
    • Updated the systemd service file for cupsd (Issue #5551)
    • -
    • The ippValidateAttribute function did not catch all instances of invalid
      -UTF-8 strings (Issue #5509)
    • -
    • Fixed an issue with the self-signed certificates generated by GNU TLS
      -(Issue #5506)
    • -
    • Fixed a potential memory leak when reading at the end of a file (Issue #5473)
    • -
    • Fixed potential unaligned accesses in the string pool (Issue #5474)
    • -
    • Fixed a potential memory leak when loading a PPD file (Issue #5475)
    • -
    • Added a USB quirks rule for the Lexmark E120n (Issue #5478)
    • -
    • Updated the USB quirks rule for Zebra label printers (Issue #5395)
    • -
    • Fixed a compile error on Linux (Issue #5483)
    • -
    • The lpadmin command, web interface, and scheduler all queried an IPP
      -Everywhere printer differently, resulting in different PPDs for the same
      -printer (Issue #5484)
    • -
    • The web interface no longer provides access to the log files (Issue #5513)
    • -
    • Non-Kerberized printing to Windows via IPP was broken (Issue #5515)
    • -
    • Eliminated use of private headers and some deprecated macOS APIs (Issue #5516)
    • -
    • The scheduler no longer stops a printer if an error occurs when a job is
      -canceled or aborted (Issue #5517)
    • -
    • Added a USB quirks rule for the DYMO 450 Turbo (Issue #5521)
    • -
    • Added a USB quirks rule for Xerox printers (Issue #5523)
    • -
    • The scheduler's self-signed certificate did not include all of the alternate
      -names for the server when using GNU TLS (Issue #5525)
    • -
    • Fixed compiler warnings with newer versions of GCC (Issue #5532, Issue #5533)
    • -
    • Fixed some PPD caching and IPP Everywhere PPD accounting/password bugs
      -(Issue #5535)
    • -
    • Fixed PreserveJobHistory bug with time values (Issue #5538)
    • -
    • The scheduler no longer advertises the HTTP methods it supports (Issue #5540)
    • -
    • Localization updates (Issue #5461, Issues #5471, Issue #5481, Issue #5486,
      -Issue #5489, Issue #5491, Issue #5492, Issue #5493, Issue #5494, Issue #5495,
      -Issue #5497, Issue #5499, Issue #5500, Issue #5501, Issue #5504)
    • -
    • The scheduler did not always idle exit as quickly as it could.
    • -
    • Added a new ippeveprinter command based on the old ippserver sample code.
    • -
    -

    Enjoy!

    -
    - - -
    - -
    - - - Assets - 4 -
    -
    - -
    - - -
    -
    - -
    - - - -
    - -
    -
    -
    - - -
    - - - - -
    - -
    -
    -
    -
    - v2.2.11 -
    - - -
    - - -

    - @michaelrsweet - michaelrsweet - released this - Mar 22, 2019 - · - - 793 commits - to master - since this release -

    -
    - - - -
    -

    CUPS 2.2.11 is a bug fix release that addresses issues in the scheduler,
    -IPP Everywhere support, CUPS library, and USB printer support. Changes include:

    -
      -
    • Running ppdmerge with the same input and output filenames did not work as
      -advertised (Issue #5455)
    • -
    • Fixed a potential memory leak when reading at the end of a file (Issue #5473)
    • -
    • Fixed potential unaligned accesses in the string pool (Issue #5474)
    • -
    • Fixed a potential memory leak when loading a PPD file (Issue #5475)
    • -
    • Added a USB quirks rule for the Lexmark E120n (Issue #5478)
    • -
    • Updated the USB quirks rule for Zebra label printers (Issue #5395)
    • -
    • Fixed a compile error on Linux (Issue #5483)
    • -
    • The lpadmin command, web interface, and scheduler all queried an IPP
      -Everywhere printer differently, resulting in different PPDs for the same
      -printer (Issue #5484)
    • -
    • Fixed an issue with the self-signed certificates generated by GNU TLS
      -(Issue #5506)
    • -
    • The ippValidateAttribute function did not catch all instances of invalid
      -UTF-8 strings (Issue #5509)
    • -
    • Non-Kerberized printing to Windows via IPP was broken (Issue #5515)
    • -
    • The scheduler no longer stops a printer if an error occurs when a job is
      -canceled or aborted (Issue #5517)
    • -
    • Added a USB quirks rule for the DYMO 450 Turbo (Issue #5521)
    • -
    • Added a USB quirks rule for Xerox printers (Issue #5523)
    • -
    • The scheduler's self-signed certificate did not include all of the alternate
      -names for the server when using GNU TLS (Issue #5525)
    • -
    • Fixed compiler warnings with newer versions of GCC (Issue #5532, Issue #5533)
    • -
    • Fixed some PPD caching and IPP Everywhere PPD accounting/password bugs
      -(Issue #5535)
    • -
    • Fixed PreserveJobHistory bug with time values (Issue #5538)
    • -
    • Media size matching now uses a tolerance of 0.5mm (rdar://33822024)
    • -
    • The lpadmin command would hang with a bad PPD file (rdar://41495016)
    • -
    • Fixed a potential crash bug in cups-driverd (rdar://46625579)
    • -
    • Fixed a performance regression with large PPDs (rdar://47040759)
    • -
    • The scheduler did not always idle exit as quickly as it could.
    • -
    -

    Enjoy!

    -
    - - -
    - -
    - - - Assets - 4 -
    -
    - -
    - - -
    -
    - -
    - - - -
    - -
    -
    -
    - - Pre-release - - - -
    - - - - -
    - -
    -
    -
    -
    - v2.3b7 -
    - - - Pre-release - - -
    - - -

    - @michaelrsweet - michaelrsweet - released this - Dec 14, 2018 - · - - 350 commits - to master - since this release -

    -
    - - - -
    -

    CUPS 2.3b7 is now available for download. This is the sixth beta of the CUPS 2.3 series which adopts the new CUPS license, adds support for IPP presets and finishing templates, and fixes a number of bugs and "polish" issues.

    -

    Changes include:

    -
      -
    • Fixed some build failures (Issue #5451, Issue #5463)
    • -
    • Running ppdmerge with the same input and output filenames did not work as
      -advertised (Issue #5455)
    • -
    -

    Enjoy!

    -
    - - -
    - -
    - - - Assets - 4 -
    -
    - -
    - - -
    -
    - -
    - - - -
    - -
    -
    -
    - - Pre-release - - - -
    - - - - -
    - -
    -
    -
    -
    - v2.3b6 -
    - - - Pre-release - - -
    - - -

    - @michaelrsweet - michaelrsweet - released this - Dec 7, 2018 - · - - 358 commits - to master - since this release -

    -
    - - - -
    -

    CUPS 2.3b6 is now available for download. This is the sixth beta of the CUPS 2.3 series which adopts the new CUPS license, adds support for IPP presets and finishing templates, and fixes a number of bugs and “polish” issues. Changes include:

    -
      -
    • Localization update (Issue #5339, Issue #5348, Issue #5362, Issue #5408, Issue #5410)
    • -
    • Documentation updates (Issue #5369, Issue #5402, Issue #5403, Issue #5404)
    • -
    • CVE-2018-4700: Linux session cookies used a predictable random number seed.
    • -
    • All user commands now support the --help option (Issue #5326)
    • -
    • The lpoptions command now works with IPP Everywhere printers that have not yet been added as local queues (Issue #5045)
    • -
    • The lpadmin command would create a non-working printer in some error cases (Issue #5305)
    • -
    • The scheduler would crash if an empty AccessLog directive was specified (Issue #5309)
    • -
    • The scheduler did not idle-exit on some Linux distributions (Issue #5319)
    • -
    • Fixed a regression in the changes to ippValidateAttribute (Issue #5322, Issue #5330)
    • -
    • Fixed a crash bug in the Epson dot matrix driver (Issue #5323)
    • -
    • Automatic debug logging of job errors did not work with systemd (Issue #5337)
    • -
    • The web interface did not list the IPP Everywhere "driver" (Issue #5338)
    • -
    • The scheduler did not report all of the supported job options and values (Issue #5340)
    • -
    • The IPP Everywhere "driver" now properly supports face-up printers (Issue #5345)
    • -
    • Fixed some typos in the label printer drivers (Issue #5350)
    • -
    • Setting the Community name to the empty string in snmp.conf now disables SNMP supply level monitoring by all the standard network backends (Issue #5354)
    • -
    • Multi-file jobs could get stuck if the backend failed (Issue #5359, Issue #5413)
    • -
    • The IPP Everywhere "driver" no longer does local filtering when printing to a shared CUPS printer (Issue #5361)
    • -
    • The lpadmin command now correctly reports IPP errors when configuring an IPP Everywhere printer (Issue #5370)
    • -
    • Fixed some memory leaks discovered by Coverity (Issue #5375)
    • -
    • The PPD compiler incorrectly terminated JCL options (Issue #5379)
    • -
    • The cupstestppd utility did not generate errors for missing/mismatched CloseUI/JCLCloseUI keywords (Issue #5381)
    • -
    • The scheduler now reports the actual location of the log file (Issue #5398)
    • -
    • Added USB quirk rules (Issue #5395, Issue #5420, Issue #5443)
    • -
    • The generated PPD files for IPP Everywhere printers did not contain the cupsManualCopies keyword (Issue #5433)
    • -
    • Kerberos credentials might be truncated (Issue #5435)
    • -
    • The handling of MaxJobTime 0 did not match the documentation (Issue #5438)
    • -
    • Fixed a bug adding a queue with the -E option (Issue #5440)
    • -
    • The cupsaddsmb program has been removed (Issue #5449)
    • -
    • The cupstestdsc program has been removed (Issue #5450)
    • -
    • The scheduler was being backgrounded on macOS, causing applications to spin (rdar://40436080)
    • -
    • The scheduler did not validate that required initial request attributes were in the operation group (rdar://41098178)
    • -
    • Authentication in the web interface did not work on macOS (rdar://41444473)
    • -
    • Fixed an issue with HTTP Digest authentication (rdar://41709086)
    • -
    • The scheduler could crash when job history was purged (rdar://42198057)
    • -
    • Fixed a crash bug when mapping PPD duplex options to IPP attributes (rdar://46183976)
    • -
    • Fixed a memory leak for some IPP (extension) syntaxes.
    • -
    • The cupscgi, cupsmime, and cupsppdc support libraries are no longer installed as shared libraries.
    • -
    • The snmp backend is now deprecated.
    • -
    -

    Enjoy!

    -
    - - -
    - -
    - - - Assets - 4 -
    -
    - -
    - - -
    -
    - -
    - - - -
    - -
    -
    -
    - - -
    - - - - -
    - -
    -
    -
    -
    - v2.2.10 -
    - - -
    - - -

    - @michaelrsweet - michaelrsweet - released this - Dec 7, 2018 - · - - 793 commits - to master - since this release -

    -
    - - - -
    -

    CUPS 2.2.10 is a bug fix release that addresses issues in the scheduler, IPP Everywhere support, CUPS library, and USB printer support. Changes include:

    -
      -
    • CVE-2018-4300: Linux session cookies used a predictable random number seed.
    • -
    • The lpoptions command now works with IPP Everywhere printers that have not yet been added as local queues (Issue #5045)
    • -
    • Added USB quirk rules (Issue #5395, Issue #5443)
    • -
    • The generated PPD files for IPP Everywhere printers did not contain the cupsManualCopies keyword (Issue #5433)
    • -
    • Kerberos credentials might be truncated (Issue #5435)
    • -
    • The handling of MaxJobTime 0 did not match the documentation (Issue #5438)
    • -
    • Incorporated the page accounting changes from CUPS 2.3 (Issue #5439)
    • -
    • Fixed a bug adding a queue with the -E option (Issue #5440)
    • -
    • Fixed a crash bug when mapping PPD duplex options to IPP attributes (rdar://46183976)
    • -
    -

    Enjoy!

    -
    - - -
    - -
    - - - Assets - 4 -
    -
    - -
    - - -
    -
    - -
    - -
    - -
    - -
    - - -
    -
    - -
    -
    - - -
    - - - - - - -
    - - - You can’t perform that action at this time. -
    - - - - - - - - - - - - - - -
    - - - - diff --git a/bitbake/lib/bb/tests/fetch-testdata/debian/pool/main/d/db5.3/index.html b/bitbake/lib/bb/tests/fetch-testdata/debian/pool/main/d/db5.3/index.html deleted file mode 100644 index a5a6f4839e..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/debian/pool/main/d/db5.3/index.html +++ /dev/null @@ -1,509 +0,0 @@ - - - - Index of /debian/pool/main/d/db5.3 - - -

    Index of /debian/pool/main/d/db5.3

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSize

    [PARENTDIR]Parent Directory  -
    [   ]db5.3-doc_5.3.28+dfsg1-0.5_all.deb2019-02-26 10:57 15M
    [   ]db5.3-doc_5.3.28+dfsg1-0.6_all.deb2019-03-12 05:28 15M
    [   ]db5.3-doc_5.3.28-9+deb8u1_all.deb2017-11-18 20:15 18M
    [   ]db5.3-doc_5.3.28-12+deb9u1_all.deb2017-09-28 09:42 18M
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 21K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 19K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 20K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 19K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 22K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 19K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 19K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 19K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 23K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 20K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 21K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 19K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 20K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 19K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 22K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 19K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 19K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 23K
    [   ]db5.3-sql-util_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 20K
    [   ]db5.3-sql-util_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 21K
    [   ]db5.3-sql-util_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 20K
    [   ]db5.3-sql-util_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 19K
    [   ]db5.3-sql-util_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 22K
    [   ]db5.3-sql-util_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 20K
    [   ]db5.3-sql-util_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 18K
    [   ]db5.3-sql-util_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 20K
    [   ]db5.3-sql-util_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 19K
    [   ]db5.3-sql-util_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 22K
    [   ]db5.3-sql-util_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 19K
    [   ]db5.3-sql-util_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 19K
    [   ]db5.3-sql-util_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 19K
    [   ]db5.3-sql-util_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 19K
    [   ]db5.3-sql-util_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 20K
    [   ]db5.3-util_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 63K
    [   ]db5.3-util_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 63K
    [   ]db5.3-util_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 58K
    [   ]db5.3-util_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 59K
    [   ]db5.3-util_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 65K
    [   ]db5.3-util_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 62K
    [   ]db5.3-util_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 64K
    [   ]db5.3-util_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 63K
    [   ]db5.3-util_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 69K
    [   ]db5.3-util_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 64K
    [   ]db5.3-util_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 63K
    [   ]db5.3-util_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 63K
    [   ]db5.3-util_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 58K
    [   ]db5.3-util_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 59K
    [   ]db5.3-util_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 65K
    [   ]db5.3-util_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 64K
    [   ]db5.3-util_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 63K
    [   ]db5.3-util_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 69K
    [   ]db5.3-util_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 64K
    [   ]db5.3-util_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 63K
    [   ]db5.3-util_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 60K
    [   ]db5.3-util_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 61K
    [   ]db5.3-util_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 65K
    [   ]db5.3-util_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 63K
    [   ]db5.3-util_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 60K
    [   ]db5.3-util_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 60K
    [   ]db5.3-util_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 61K
    [   ]db5.3-util_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 66K
    [   ]db5.3-util_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 62K
    [   ]db5.3-util_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 64K
    [   ]db5.3-util_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 63K
    [   ]db5.3-util_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 60K
    [   ]db5.3-util_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 64K
    [   ]db5.3_5.3.28+dfsg1-0.5.debian.tar.xz2019-02-26 08:51 28K
    [   ]db5.3_5.3.28+dfsg1-0.5.dsc2019-02-26 08:51 2.7K
    [   ]db5.3_5.3.28+dfsg1-0.6.debian.tar.xz2019-03-12 04:27 29K
    [   ]db5.3_5.3.28+dfsg1-0.6.dsc2019-03-12 04:27 3.1K
    [   ]db5.3_5.3.28+dfsg1.orig.tar.xz2018-08-09 01:52 19M
    [   ]db5.3_5.3.28-9+deb8u1.debian.tar.xz2017-11-18 20:15 28K
    [   ]db5.3_5.3.28-9+deb8u1.dsc2017-11-18 20:15 3.2K
    [   ]db5.3_5.3.28-12+deb9u1.debian.tar.xz2017-09-24 16:26 28K
    [   ]db5.3_5.3.28-12+deb9u1.dsc2017-09-24 16:26 3.2K
    [   ]db5.3_5.3.28.orig.tar.xz2013-10-27 14:01 23M
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 760K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 722K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 668K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 692K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 842K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 773K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 801K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 789K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 816K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 693K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 759K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 722K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 668K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 692K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 842K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 801K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 789K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 816K
    [   ]libdb5.3++-dev_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 693K
    [   ]libdb5.3++-dev_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 740K
    [   ]libdb5.3++-dev_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 643K
    [   ]libdb5.3++-dev_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 672K
    [   ]libdb5.3++-dev_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 780K
    [   ]libdb5.3++-dev_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 757K
    [   ]libdb5.3++-dev_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 663K
    [   ]libdb5.3++-dev_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 676K
    [   ]libdb5.3++-dev_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 690K
    [   ]libdb5.3++-dev_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 836K
    [   ]libdb5.3++-dev_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 768K
    [   ]libdb5.3++-dev_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 795K
    [   ]libdb5.3++-dev_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 786K
    [   ]libdb5.3++-dev_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 701K
    [   ]libdb5.3++-dev_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 714K
    [   ]libdb5.3++_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 690K
    [   ]libdb5.3++_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 628K
    [   ]libdb5.3++_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 576K
    [   ]libdb5.3++_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 600K
    [   ]libdb5.3++_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 755K
    [   ]libdb5.3++_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 595K
    [   ]libdb5.3++_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 604K
    [   ]libdb5.3++_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 604K
    [   ]libdb5.3++_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 723K
    [   ]libdb5.3++_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 619K
    [   ]libdb5.3++_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 690K
    [   ]libdb5.3++_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 629K
    [   ]libdb5.3++_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 574K
    [   ]libdb5.3++_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 600K
    [   ]libdb5.3++_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 755K
    [   ]libdb5.3++_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 604K
    [   ]libdb5.3++_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 604K
    [   ]libdb5.3++_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 724K
    [   ]libdb5.3++_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 620K
    [   ]libdb5.3++_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 691K
    [   ]libdb5.3++_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 580K
    [   ]libdb5.3++_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 606K
    [   ]libdb5.3++_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 749K
    [   ]libdb5.3++_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 687K
    [   ]libdb5.3++_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 574K
    [   ]libdb5.3++_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 575K
    [   ]libdb5.3++_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 600K
    [   ]libdb5.3++_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 751K
    [   ]libdb5.3++_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 593K
    [   ]libdb5.3++_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 601K
    [   ]libdb5.3++_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 602K
    [   ]libdb5.3++_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 618K
    [   ]libdb5.3++_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 637K
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 43M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 42M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 42M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 42M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 41M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 43M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 43M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 42M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 42M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 44M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 43M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 42M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 42M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 42M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 41M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 43M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 42M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 42M
    [   ]libdb5.3-dbg_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 44M
    [   ]libdb5.3-dbg_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 34M
    [   ]libdb5.3-dbg_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 35M
    [   ]libdb5.3-dbg_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 35M
    [   ]libdb5.3-dbg_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 32M
    [   ]libdb5.3-dbg_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 40M
    [   ]libdb5.3-dbg_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 40M
    [   ]libdb5.3-dbg_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 39M
    [   ]libdb5.3-dbg_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 39M
    [   ]libdb5.3-dbg_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 38M
    [   ]libdb5.3-dbg_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 41M
    [   ]libdb5.3-dbg_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 40M
    [   ]libdb5.3-dbg_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 39M
    [   ]libdb5.3-dbg_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 40M
    [   ]libdb5.3-dbg_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 42M
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 743K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 707K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 654K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 677K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 822K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 753K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 780K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 769K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 797K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 679K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 743K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 707K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 654K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 677K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 822K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 780K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 769K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 797K
    [   ]libdb5.3-dev_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 679K
    [   ]libdb5.3-dev_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 725K
    [   ]libdb5.3-dev_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 630K
    [   ]libdb5.3-dev_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 659K
    [   ]libdb5.3-dev_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 766K
    [   ]libdb5.3-dev_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 742K
    [   ]libdb5.3-dev_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 648K
    [   ]libdb5.3-dev_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 660K
    [   ]libdb5.3-dev_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 676K
    [   ]libdb5.3-dev_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 817K
    [   ]libdb5.3-dev_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 748K
    [   ]libdb5.3-dev_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 774K
    [   ]libdb5.3-dev_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 765K
    [   ]libdb5.3-dev_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 682K
    [   ]libdb5.3-dev_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 699K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 757K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 720K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 662K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 688K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 840K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 768K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 795K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 784K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 814K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 688K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 756K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 721K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 663K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 688K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 840K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 796K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 785K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 814K
    [   ]libdb5.3-java-dev_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 688K
    [   ]libdb5.3-java-dev_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 737K
    [   ]libdb5.3-java-dev_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 637K
    [   ]libdb5.3-java-dev_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 666K
    [   ]libdb5.3-java-dev_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 781K
    [   ]libdb5.3-java-dev_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 756K
    [   ]libdb5.3-java-dev_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 657K
    [   ]libdb5.3-java-dev_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 670K
    [   ]libdb5.3-java-dev_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 686K
    [   ]libdb5.3-java-dev_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 836K
    [   ]libdb5.3-java-dev_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 763K
    [   ]libdb5.3-java-dev_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 789K
    [   ]libdb5.3-java-dev_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 780K
    [   ]libdb5.3-java-dev_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 691K
    [   ]libdb5.3-java-dev_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 711K
    [   ]libdb5.3-java-gcj_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 602K
    [   ]libdb5.3-java-gcj_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 455K
    [   ]libdb5.3-java-gcj_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 450K
    [   ]libdb5.3-java-gcj_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 503K
    [   ]libdb5.3-java-gcj_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 602K
    [   ]libdb5.3-java-gcj_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 595K
    [   ]libdb5.3-java-gcj_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 453K
    [   ]libdb5.3-java-gcj_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 447K
    [   ]libdb5.3-java-gcj_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 518K
    [   ]libdb5.3-java-gcj_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 463K
    [   ]libdb5.3-java-gcj_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 474K
    [   ]libdb5.3-java-gcj_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 471K
    [   ]libdb5.3-java-gcj_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 611K
    [   ]libdb5.3-java-gcj_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 578K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 696K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 635K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 580K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 606K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 763K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 596K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 603K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 605K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 732K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 624K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 696K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 636K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 581K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 606K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 762K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 603K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 605K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 733K
    [   ]libdb5.3-java-jni_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 625K
    [   ]libdb5.3-java-jni_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 696K
    [   ]libdb5.3-java-jni_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 583K
    [   ]libdb5.3-java-jni_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 612K
    [   ]libdb5.3-java-jni_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 754K
    [   ]libdb5.3-java-jni_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 695K
    [   ]libdb5.3-java-jni_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 574K
    [   ]libdb5.3-java-jni_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 580K
    [   ]libdb5.3-java-jni_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 606K
    [   ]libdb5.3-java-jni_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 758K
    [   ]libdb5.3-java-jni_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 593K
    [   ]libdb5.3-java-jni_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 600K
    [   ]libdb5.3-java-jni_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 603K
    [   ]libdb5.3-java-jni_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 618K
    [   ]libdb5.3-java-jni_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 641K
    [   ]libdb5.3-java_5.3.28+dfsg1-0.5_all.deb2019-02-26 10:57 575K
    [   ]libdb5.3-java_5.3.28+dfsg1-0.6_all.deb2019-03-12 05:28 575K
    [   ]libdb5.3-java_5.3.28-9+deb8u1_all.deb2017-11-18 20:15 543K
    [   ]libdb5.3-java_5.3.28-12+deb9u1_all.deb2017-09-28 09:42 548K
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 1.0M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 968K
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 901K
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 929K
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 1.1M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 1.0M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 1.1M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 1.0M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 1.1M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 938K
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 1.0M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 968K
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 901K
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 929K
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 1.1M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 1.1M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 1.0M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 1.1M
    [   ]libdb5.3-sql-dev_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 938K
    [   ]libdb5.3-sql-dev_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 1.0M
    [   ]libdb5.3-sql-dev_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 869K
    [   ]libdb5.3-sql-dev_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 906K
    [   ]libdb5.3-sql-dev_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 1.0M
    [   ]libdb5.3-sql-dev_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 1.0M
    [   ]libdb5.3-sql-dev_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 891K
    [   ]libdb5.3-sql-dev_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 910K
    [   ]libdb5.3-sql-dev_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 929K
    [   ]libdb5.3-sql-dev_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 1.1M
    [   ]libdb5.3-sql-dev_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 1.0M
    [   ]libdb5.3-sql-dev_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 1.0M
    [   ]libdb5.3-sql-dev_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 1.0M
    [   ]libdb5.3-sql-dev_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 939K
    [   ]libdb5.3-sql-dev_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 965K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 885K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 808K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 737K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 766K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 963K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 777K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 789K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 788K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 928K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 794K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 883K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 808K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 739K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 766K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 963K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 789K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 788K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 927K
    [   ]libdb5.3-sql_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 794K
    [   ]libdb5.3-sql_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 882K
    [   ]libdb5.3-sql_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 742K
    [   ]libdb5.3-sql_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 774K
    [   ]libdb5.3-sql_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 954K
    [   ]libdb5.3-sql_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 879K
    [   ]libdb5.3-sql_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 733K
    [   ]libdb5.3-sql_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 737K
    [   ]libdb5.3-sql_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 766K
    [   ]libdb5.3-sql_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 958K
    [   ]libdb5.3-sql_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 776K
    [   ]libdb5.3-sql_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 786K
    [   ]libdb5.3-sql_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 788K
    [   ]libdb5.3-sql_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 786K
    [   ]libdb5.3-sql_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 817K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 842K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 804K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 751K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 774K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 927K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 856K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 885K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 874K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 900K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 773K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 842K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 804K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 751K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 774K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 927K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 886K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 874K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 900K
    [   ]libdb5.3-stl-dev_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 774K
    [   ]libdb5.3-stl-dev_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 826K
    [   ]libdb5.3-stl-dev_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 727K
    [   ]libdb5.3-stl-dev_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 757K
    [   ]libdb5.3-stl-dev_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 868K
    [   ]libdb5.3-stl-dev_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 838K
    [   ]libdb5.3-stl-dev_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 745K
    [   ]libdb5.3-stl-dev_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 759K
    [   ]libdb5.3-stl-dev_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 773K
    [   ]libdb5.3-stl-dev_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 920K
    [   ]libdb5.3-stl-dev_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 852K
    [   ]libdb5.3-stl-dev_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 880K
    [   ]libdb5.3-stl-dev_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 870K
    [   ]libdb5.3-stl-dev_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 783K
    [   ]libdb5.3-stl-dev_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 795K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 711K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 650K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 592K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 618K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 779K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 614K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 623K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 623K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 746K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 639K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 712K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 649K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 592K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 618K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 779K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 623K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 624K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 746K
    [   ]libdb5.3-stl_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 639K
    [   ]libdb5.3-stl_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 714K
    [   ]libdb5.3-stl_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 599K
    [   ]libdb5.3-stl_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 627K
    [   ]libdb5.3-stl_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 774K
    [   ]libdb5.3-stl_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 707K
    [   ]libdb5.3-stl_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 594K
    [   ]libdb5.3-stl_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 591K
    [   ]libdb5.3-stl_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 617K
    [   ]libdb5.3-stl_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 774K
    [   ]libdb5.3-stl_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 612K
    [   ]libdb5.3-stl_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 620K
    [   ]libdb5.3-stl_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 622K
    [   ]libdb5.3-stl_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 639K
    [   ]libdb5.3-stl_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 657K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 954K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 894K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 805K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 826K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 1.0M
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 921K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 952K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 941K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 1.0M
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 854K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 954K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 894K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 803K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 825K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 1.0M
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 952K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 942K
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 1.0M
    [   ]libdb5.3-tcl_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 853K
    [   ]libdb5.3-tcl_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 1.1M
    [   ]libdb5.3-tcl_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 971K
    [   ]libdb5.3-tcl_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 1.0M
    [   ]libdb5.3-tcl_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 1.3M
    [   ]libdb5.3-tcl_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 949K
    [   ]libdb5.3-tcl_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 809K
    [   ]libdb5.3-tcl_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 808K
    [   ]libdb5.3-tcl_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 823K
    [   ]libdb5.3-tcl_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 1.0M
    [   ]libdb5.3-tcl_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 915K
    [   ]libdb5.3-tcl_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 943K
    [   ]libdb5.3-tcl_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 936K
    [   ]libdb5.3-tcl_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 871K
    [   ]libdb5.3-tcl_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 885K
    [   ]libdb5.3_5.3.28+dfsg1-0.5_amd64.deb2019-02-26 10:11 667K
    [   ]libdb5.3_5.3.28+dfsg1-0.5_arm64.deb2019-02-26 09:56 607K
    [   ]libdb5.3_5.3.28+dfsg1-0.5_armel.deb2019-02-26 10:57 558K
    [   ]libdb5.3_5.3.28+dfsg1-0.5_armhf.deb2019-02-26 10:57 583K
    [   ]libdb5.3_5.3.28+dfsg1-0.5_i386.deb2019-02-26 09:46 730K
    [   ]libdb5.3_5.3.28+dfsg1-0.5_mips.deb2019-02-26 10:57 575K
    [   ]libdb5.3_5.3.28+dfsg1-0.5_mips64el.deb2019-02-26 12:42 583K
    [   ]libdb5.3_5.3.28+dfsg1-0.5_mipsel.deb2019-02-26 12:42 584K
    [   ]libdb5.3_5.3.28+dfsg1-0.5_ppc64el.deb2019-02-26 09:56 701K
    [   ]libdb5.3_5.3.28+dfsg1-0.5_s390x.deb2019-02-26 09:56 599K
    [   ]libdb5.3_5.3.28+dfsg1-0.6_amd64.deb2019-03-12 05:28 667K
    [   ]libdb5.3_5.3.28+dfsg1-0.6_arm64.deb2019-03-12 05:28 607K
    [   ]libdb5.3_5.3.28+dfsg1-0.6_armel.deb2019-03-12 06:29 559K
    [   ]libdb5.3_5.3.28+dfsg1-0.6_armhf.deb2019-03-12 06:14 583K
    [   ]libdb5.3_5.3.28+dfsg1-0.6_i386.deb2019-03-12 05:43 730K
    [   ]libdb5.3_5.3.28+dfsg1-0.6_mips64el.deb2019-03-12 06:29 583K
    [   ]libdb5.3_5.3.28+dfsg1-0.6_mipsel.deb2019-03-12 07:30 584K
    [   ]libdb5.3_5.3.28+dfsg1-0.6_ppc64el.deb2019-03-12 05:28 701K
    [   ]libdb5.3_5.3.28+dfsg1-0.6_s390x.deb2019-03-12 05:13 598K
    [   ]libdb5.3_5.3.28-9+deb8u1_amd64.deb2017-11-20 03:40 664K
    [   ]libdb5.3_5.3.28-9+deb8u1_armel.deb2017-11-20 04:26 561K
    [   ]libdb5.3_5.3.28-9+deb8u1_armhf.deb2017-12-02 16:26 587K
    [   ]libdb5.3_5.3.28-9+deb8u1_i386.deb2017-11-20 03:25 721K
    [   ]libdb5.3_5.3.28-12+deb9u1_amd64.deb2017-09-28 09:42 663K
    [   ]libdb5.3_5.3.28-12+deb9u1_arm64.deb2017-09-28 09:42 550K
    [   ]libdb5.3_5.3.28-12+deb9u1_armel.deb2017-09-28 10:28 556K
    [   ]libdb5.3_5.3.28-12+deb9u1_armhf.deb2017-09-28 10:28 581K
    [   ]libdb5.3_5.3.28-12+deb9u1_i386.deb2017-09-28 09:27 725K
    [   ]libdb5.3_5.3.28-12+deb9u1_mips.deb2017-09-29 16:10 572K
    [   ]libdb5.3_5.3.28-12+deb9u1_mips64el.deb2017-09-28 11:29 579K
    [   ]libdb5.3_5.3.28-12+deb9u1_mipsel.deb2017-09-28 11:14 581K
    [   ]libdb5.3_5.3.28-12+deb9u1_ppc64el.deb2017-09-28 09:27 594K
    [   ]libdb5.3_5.3.28-12+deb9u1_s390x.deb2017-09-28 09:42 615K

    -
    Apache Server at ftp.debian.org Port 80
    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/debian/pool/main/m/minicom/index.html b/bitbake/lib/bb/tests/fetch-testdata/debian/pool/main/m/minicom/index.html deleted file mode 100644 index 4a1eb4de13..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/debian/pool/main/m/minicom/index.html +++ /dev/null @@ -1,59 +0,0 @@ - - - - Index of /debian/pool/main/m/minicom - - -

    Index of /debian/pool/main/m/minicom

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSize

    [PARENTDIR]Parent Directory  -
    [   ]minicom_2.7-1+deb8u1.debian.tar.xz2017-04-24 08:22 14K
    [   ]minicom_2.7-1+deb8u1.dsc2017-04-24 08:22 1.9K
    [   ]minicom_2.7-1+deb8u1_amd64.deb2017-04-25 21:10 257K
    [   ]minicom_2.7-1+deb8u1_armel.deb2017-04-26 00:58 246K
    [   ]minicom_2.7-1+deb8u1_armhf.deb2017-04-26 00:58 245K
    [   ]minicom_2.7-1+deb8u1_i386.deb2017-04-25 21:41 258K
    [   ]minicom_2.7-1.1.debian.tar.xz2017-04-22 09:34 14K
    [   ]minicom_2.7-1.1.dsc2017-04-22 09:34 1.9K
    [   ]minicom_2.7-1.1_amd64.deb2017-04-22 15:29 261K
    [   ]minicom_2.7-1.1_arm64.deb2017-04-22 15:29 250K
    [   ]minicom_2.7-1.1_armel.deb2017-04-22 15:29 255K
    [   ]minicom_2.7-1.1_armhf.deb2017-04-22 15:29 254K
    [   ]minicom_2.7-1.1_i386.deb2017-04-22 15:29 266K
    [   ]minicom_2.7-1.1_mips.deb2017-04-22 15:29 258K
    [   ]minicom_2.7-1.1_mips64el.deb2017-04-22 15:29 259K
    [   ]minicom_2.7-1.1_mipsel.deb2017-04-22 15:29 259K
    [   ]minicom_2.7-1.1_ppc64el.deb2017-04-22 15:29 253K
    [   ]minicom_2.7-1.1_s390x.deb2017-04-22 15:29 261K
    [   ]minicom_2.7.1-1+b1_amd64.deb2018-05-06 08:14 262K
    [   ]minicom_2.7.1-1+b1_arm64.deb2018-05-06 07:58 250K
    [   ]minicom_2.7.1-1+b1_armel.deb2018-05-06 08:45 253K
    [   ]minicom_2.7.1-1+b1_armhf.deb2018-05-06 10:42 253K
    [   ]minicom_2.7.1-1+b1_i386.deb2018-05-06 08:55 266K
    [   ]minicom_2.7.1-1+b1_mips.deb2018-05-06 08:14 258K
    [   ]minicom_2.7.1-1+b1_mipsel.deb2018-05-06 12:13 259K
    [   ]minicom_2.7.1-1+b1_ppc64el.deb2018-05-06 09:10 260K
    [   ]minicom_2.7.1-1+b1_s390x.deb2018-05-06 08:14 257K
    [   ]minicom_2.7.1-1+b2_mips64el.deb2018-05-06 09:41 260K
    [   ]minicom_2.7.1-1.debian.tar.xz2017-08-13 15:40 14K
    [   ]minicom_2.7.1-1.dsc2017-08-13 15:40 1.8K
    [   ]minicom_2.7.1.orig.tar.gz2017-08-13 15:40 855K
    [   ]minicom_2.7.orig.tar.gz2014-01-01 09:36 843K
    [   ]minicom_2.8-2.debian.tar.xz2021-06-15 03:47 14K
    [   ]minicom_2.8-2.dsc2021-06-15 03:47 1.8K
    [   ]minicom_2.8-2_amd64.deb2021-06-15 03:58 280K
    [   ]minicom_2.8-2_arm64.deb2021-06-15 04:13 275K
    [   ]minicom_2.8-2_armel.deb2021-06-15 04:13 271K
    [   ]minicom_2.8-2_armhf.deb2021-06-15 04:13 272K
    [   ]minicom_2.8-2_i386.deb2021-06-15 04:13 285K
    [   ]minicom_2.8-2_mips64el.deb2021-06-15 04:13 277K
    [   ]minicom_2.8-2_mipsel.deb2021-06-15 04:13 278K
    [   ]minicom_2.8-2_ppc64el.deb2021-06-15 04:13 286K
    [   ]minicom_2.8-2_s390x.deb2021-06-15 03:58 275K
    [   ]minicom_2.8.orig.tar.bz22021-01-03 12:44 598K

    -
    Apache Server at ftp.debian.org Port 80
    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/downloads/enchant/1.6.0/index.html b/bitbake/lib/bb/tests/fetch-testdata/downloads/enchant/1.6.0/index.html deleted file mode 100644 index b7bfb1e947..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/downloads/enchant/1.6.0/index.html +++ /dev/null @@ -1,15 +0,0 @@ - - - - Index of /downloads/enchant/1.6.0 - - -

    Index of /downloads/enchant/1.6.0

    - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [DIR]Parent Directory  -  
    [   ]MD5SUM01-Apr-2010 23:03 55  
    [   ]enchant-1.6.0.tar.gz01-Apr-2010 23:02 593K 

    -
    Apache/2.2.15 (Fedora) Server at www.abisource.com Port 443
    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v2.8/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v2.8/index.html deleted file mode 100644 index 9ea077d5b7..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v2.8/index.html +++ /dev/null @@ -1,774 +0,0 @@ - - - - Index of /files/v2.8 - - -

    Index of /files/v2.8

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]-*2012-06-07 12:08 0  
    [   ]CMakeChangeLog-2.8.02009-11-13 15:41 13K 
    [   ]CMakeChangeLog-2.8.12010-03-17 14:37 17K 
    [   ]CMakeChangeLog-2.8.22010-06-28 14:37 23K 
    [   ]CMakeChangeLog-2.8.32010-11-03 17:21 41K 
    [   ]CMakeChangeLog-2.8.42011-02-16 09:15 61K 
    [   ]CMakeChangeLog-2.8.52011-07-08 10:49 81K 
    [   ]CMakeChangeLog-2.8.62011-12-30 15:30 100K 
    [   ]CMakeChangeLog-2.8.72011-12-30 15:31 115K 
    [   ]CMakeChangeLog-2.8.82012-04-18 17:47 144K 
    [   ]CMakeChangeLog-2.8.92012-08-09 16:29 163K 
    [   ]CMakeChangeLog-2.8.102012-10-31 15:09 185K 
    [   ]CMakeChangeLog-2.8.10.12012-11-07 11:52 186K 
    [   ]CMakeChangeLog-2.8.10.22012-11-27 15:15 186K 
    [   ]CMakeChangeLog-2.8.112013-05-16 09:39 218K 
    [   ]CMakeChangeLog-2.8.122013-10-11 08:57 247K 
    [   ]CMakeChangeLog-2.8.12.12013-11-08 14:33 248K 
    [   ]CMakeVS10FindMake.cmake2010-05-27 09:45 1.2K 
    [TXT]cmake-2.8.0-AIX-powerpc.sh2010-02-08 10:44 17M 
    [   ]cmake-2.8.0-AIX-powerpc.tar.Z2010-02-08 10:44 24M 
    [   ]cmake-2.8.0-AIX-powerpc.tar.gz2010-02-08 10:44 17M 
    [   ]cmake-2.8.0-Darwin-universal.dmg2009-11-13 15:32 24M 
    [   ]cmake-2.8.0-Darwin-universal.tar.Z2009-11-13 15:32 35M 
    [   ]cmake-2.8.0-Darwin-universal.tar.gz2009-11-13 15:33 24M 
    [TXT]cmake-2.8.0-HP-UX-9000_785.sh2009-11-13 15:32 14M 
    [   ]cmake-2.8.0-HP-UX-9000_785.tar.Z2009-11-13 15:32 19M 
    [   ]cmake-2.8.0-HP-UX-9000_785.tar.gz2009-11-13 15:32 14M 
    [TXT]cmake-2.8.0-IRIX64-64.sh2009-11-13 15:32 9.0M 
    [   ]cmake-2.8.0-IRIX64-64.tar.Z2009-11-13 15:32 13M 
    [   ]cmake-2.8.0-IRIX64-64.tar.gz2009-11-13 15:32 9.0M 
    [TXT]cmake-2.8.0-IRIX64-n32.sh2009-11-13 15:32 8.8M 
    [   ]cmake-2.8.0-IRIX64-n32.tar.Z2009-11-13 15:32 13M 
    [   ]cmake-2.8.0-IRIX64-n32.tar.gz2009-11-13 15:32 8.8M 
    [TXT]cmake-2.8.0-Linux-i386.sh2009-11-13 15:32 15M 
    [   ]cmake-2.8.0-Linux-i386.tar.Z2009-11-13 15:32 21M 
    [   ]cmake-2.8.0-Linux-i386.tar.gz2009-11-13 15:32 15M 
    [TXT]cmake-2.8.0-SunOS-sparc.sh2009-11-13 15:32 10M 
    [   ]cmake-2.8.0-SunOS-sparc.tar.Z2009-11-13 15:32 14M 
    [   ]cmake-2.8.0-SunOS-sparc.tar.gz2009-11-13 15:32 10M 
    [   ]cmake-2.8.0-win32-x86.exe2009-11-13 15:32 7.4M 
    [   ]cmake-2.8.0-win32-x86.zip2009-11-13 15:32 9.3M 
    [   ]cmake-2.8.0.tar.Z2009-11-13 15:32 5.4M 
    [   ]cmake-2.8.0.tar.gz2009-11-13 15:32 3.4M 
    [   ]cmake-2.8.0.zip2009-11-13 15:32 4.6M 
    [TXT]cmake-2.8.1-AIX-powerpc.sh2010-04-06 14:37 17M 
    [   ]cmake-2.8.1-AIX-powerpc.tar.Z2010-04-06 14:37 24M 
    [   ]cmake-2.8.1-AIX-powerpc.tar.gz2010-04-06 14:37 17M 
    [   ]cmake-2.8.1-Darwin-universal.dmg2010-04-06 14:37 26M 
    [   ]cmake-2.8.1-Darwin-universal.tar.Z2010-04-06 14:37 37M 
    [   ]cmake-2.8.1-Darwin-universal.tar.gz2010-04-06 14:37 26M 
    [TXT]cmake-2.8.1-HP-UX-9000_785.sh2010-04-06 14:37 14M 
    [   ]cmake-2.8.1-HP-UX-9000_785.tar.Z2010-04-06 14:37 19M 
    [   ]cmake-2.8.1-HP-UX-9000_785.tar.gz2010-04-06 14:37 14M 
    [TXT]cmake-2.8.1-IRIX64-64.sh2010-04-06 14:37 9.1M 
    [   ]cmake-2.8.1-IRIX64-64.tar.Z2010-04-06 14:37 13M 
    [   ]cmake-2.8.1-IRIX64-64.tar.gz2010-04-06 14:37 9.1M 
    [TXT]cmake-2.8.1-IRIX64-n32.sh2010-04-06 14:37 8.8M 
    [   ]cmake-2.8.1-IRIX64-n32.tar.Z2010-04-06 14:37 13M 
    [   ]cmake-2.8.1-IRIX64-n32.tar.gz2010-04-06 14:37 8.8M 
    [TXT]cmake-2.8.1-Linux-i386.sh2010-04-06 14:37 15M 
    [   ]cmake-2.8.1-Linux-i386.tar.Z2010-04-06 14:37 21M 
    [   ]cmake-2.8.1-Linux-i386.tar.gz2010-04-06 14:37 15M 
    [TXT]cmake-2.8.1-SunOS-sparc.sh2010-04-06 14:37 10M 
    [   ]cmake-2.8.1-SunOS-sparc.tar.Z2010-04-06 14:37 14M 
    [   ]cmake-2.8.1-SunOS-sparc.tar.gz2010-04-06 14:37 10M 
    [   ]cmake-2.8.1-win32-x86.exe2010-04-06 14:37 7.5M 
    [   ]cmake-2.8.1-win32-x86.zip2010-04-06 14:37 9.5M 
    [   ]cmake-2.8.1.tar.Z2010-04-06 14:37 5.5M 
    [   ]cmake-2.8.1.tar.gz2010-04-06 14:37 3.4M 
    [   ]cmake-2.8.1.zip2010-04-06 14:37 4.6M 
    [TXT]cmake-2.8.2-AIX-powerpc.sh2010-06-28 14:10 17M 
    [   ]cmake-2.8.2-AIX-powerpc.tar.Z2010-06-28 14:09 24M 
    [   ]cmake-2.8.2-AIX-powerpc.tar.gz2010-06-28 14:09 17M 
    [   ]cmake-2.8.2-Darwin-universal.dmg2010-06-28 14:10 27M 
    [   ]cmake-2.8.2-Darwin-universal.tar.Z2010-06-28 14:10 39M 
    [   ]cmake-2.8.2-Darwin-universal.tar.gz2010-06-28 14:09 27M 
    [TXT]cmake-2.8.2-HP-UX-9000_785.sh2010-06-28 14:09 15M 
    [   ]cmake-2.8.2-HP-UX-9000_785.tar.Z2010-06-28 14:10 20M 
    [   ]cmake-2.8.2-HP-UX-9000_785.tar.gz2010-06-28 14:10 15M 
    [TXT]cmake-2.8.2-IRIX64-64.sh2010-06-28 14:10 9.6M 
    [   ]cmake-2.8.2-IRIX64-64.tar.Z2010-06-28 14:09 14M 
    [   ]cmake-2.8.2-IRIX64-64.tar.gz2010-06-28 14:10 9.6M 
    [TXT]cmake-2.8.2-IRIX64-n32.sh2010-06-28 14:10 9.4M 
    [   ]cmake-2.8.2-IRIX64-n32.tar.Z2010-06-28 14:09 14M 
    [   ]cmake-2.8.2-IRIX64-n32.tar.gz2010-06-28 14:10 9.3M 
    [TXT]cmake-2.8.2-Linux-i386.sh2010-06-28 14:10 15M 
    [   ]cmake-2.8.2-Linux-i386.tar.Z2010-06-28 14:09 21M 
    [   ]cmake-2.8.2-Linux-i386.tar.gz2010-06-28 14:10 15M 
    [TXT]cmake-2.8.2-SunOS-sparc.sh2010-06-28 14:09 10M 
    [   ]cmake-2.8.2-SunOS-sparc.tar.Z2010-06-28 14:10 14M 
    [   ]cmake-2.8.2-SunOS-sparc.tar.gz2010-06-28 14:10 10M 
    [   ]cmake-2.8.2-win32-x86.exe2010-06-28 14:09 7.7M 
    [   ]cmake-2.8.2-win32-x86.zip2010-06-28 14:09 9.7M 
    [   ]cmake-2.8.2.tar.Z2010-06-28 14:10 8.1M 
    [   ]cmake-2.8.2.tar.gz2010-06-28 14:09 5.1M 
    [   ]cmake-2.8.2.zip2010-06-28 14:10 6.8M 
    [TXT]cmake-2.8.3-AIX-powerpc.sh2010-11-03 17:11 17M 
    [   ]cmake-2.8.3-AIX-powerpc.tar.Z2010-11-03 17:11 24M 
    [   ]cmake-2.8.3-AIX-powerpc.tar.gz2010-11-03 17:10 17M 
    [   ]cmake-2.8.3-Darwin-universal.dmg2010-11-03 17:11 27M 
    [   ]cmake-2.8.3-Darwin-universal.tar.Z2010-11-03 17:11 39M 
    [   ]cmake-2.8.3-Darwin-universal.tar.gz2010-11-03 17:11 27M 
    [TXT]cmake-2.8.3-IRIX64-64.sh2010-11-03 17:11 9.8M 
    [   ]cmake-2.8.3-IRIX64-64.tar.Z2010-11-03 17:11 14M 
    [   ]cmake-2.8.3-IRIX64-64.tar.gz2010-11-03 17:11 9.8M 
    [TXT]cmake-2.8.3-IRIX64-n32.sh2010-11-03 17:10 9.5M 
    [   ]cmake-2.8.3-IRIX64-n32.tar.Z2010-11-03 17:11 14M 
    [   ]cmake-2.8.3-IRIX64-n32.tar.gz2010-11-03 17:10 9.5M 
    [TXT]cmake-2.8.3-Linux-i386.sh2010-11-03 17:11 16M 
    [   ]cmake-2.8.3-Linux-i386.tar.Z2010-11-03 17:11 22M 
    [   ]cmake-2.8.3-Linux-i386.tar.gz2010-11-03 17:10 16M 
    [TXT]cmake-2.8.3-SunOS-sparc.sh2010-11-03 17:10 11M 
    [   ]cmake-2.8.3-SunOS-sparc.tar.Z2010-11-03 17:11 15M 
    [   ]cmake-2.8.3-SunOS-sparc.tar.gz2010-11-03 17:11 11M 
    [   ]cmake-2.8.3-win32-x86.exe2010-11-03 17:11 7.9M 
    [   ]cmake-2.8.3-win32-x86.zip2010-11-03 17:11 9.9M 
    [   ]cmake-2.8.3.tar.Z2010-11-03 17:11 8.1M 
    [   ]cmake-2.8.3.tar.gz2010-11-03 17:11 5.2M 
    [   ]cmake-2.8.3.zip2010-11-03 17:11 6.9M 
    [TXT]cmake-2.8.4-AIX-powerpc.sh2011-02-15 15:54 17M 
    [   ]cmake-2.8.4-AIX-powerpc.tar.Z2011-02-15 15:54 24M 
    [   ]cmake-2.8.4-AIX-powerpc.tar.gz2011-02-15 15:53 17M 
    [   ]cmake-2.8.4-Darwin-universal.dmg2011-02-15 15:53 28M 
    [   ]cmake-2.8.4-Darwin-universal.tar.Z2011-02-15 15:54 39M 
    [   ]cmake-2.8.4-Darwin-universal.tar.gz2011-02-15 15:54 27M 
    [TXT]cmake-2.8.4-IRIX64-64.sh2011-02-15 15:53 9.9M 
    [   ]cmake-2.8.4-IRIX64-64.tar.Z2011-02-15 15:54 15M 
    [   ]cmake-2.8.4-IRIX64-64.tar.gz2011-02-15 15:54 9.9M 
    [TXT]cmake-2.8.4-IRIX64-n32.sh2011-02-15 15:53 9.7M 
    [   ]cmake-2.8.4-IRIX64-n32.tar.Z2011-02-15 15:55 14M 
    [   ]cmake-2.8.4-IRIX64-n32.tar.gz2011-02-15 15:54 9.7M 
    [TXT]cmake-2.8.4-Linux-i386.sh2011-02-15 15:54 16M 
    [   ]cmake-2.8.4-Linux-i386.tar.Z2011-02-15 15:54 22M 
    [   ]cmake-2.8.4-Linux-i386.tar.gz2011-02-15 15:54 16M 
    [TXT]cmake-2.8.4-SunOS-sparc.sh2011-02-15 15:54 11M 
    [   ]cmake-2.8.4-SunOS-sparc.tar.Z2011-02-15 15:54 15M 
    [   ]cmake-2.8.4-SunOS-sparc.tar.gz2011-02-15 15:54 11M 
    [   ]cmake-2.8.4-win32-x86.exe2011-02-15 15:54 7.9M 
    [   ]cmake-2.8.4-win32-x86.zip2011-02-15 15:54 10M 
    [   ]cmake-2.8.4.tar.Z2011-02-15 15:54 8.3M 
    [   ]cmake-2.8.4.tar.gz2011-02-15 15:54 5.2M 
    [   ]cmake-2.8.4.zip2011-02-15 15:54 7.0M 
    [   ]cmake-2.8.5-1-src.tar.bz22011-07-08 10:34 4.1M 
    [   ]cmake-2.8.5-1.tar.bz22011-07-08 10:34 6.3M 
    [TXT]cmake-2.8.5-AIX-powerpc.sh2011-07-08 10:34 17M 
    [   ]cmake-2.8.5-AIX-powerpc.tar.Z2011-07-08 10:34 24M 
    [   ]cmake-2.8.5-AIX-powerpc.tar.gz2011-07-08 10:34 17M 
    [   ]cmake-2.8.5-Darwin-universal.dmg2011-07-08 10:34 28M 
    [   ]cmake-2.8.5-Darwin-universal.tar.Z2011-07-08 10:34 40M 
    [   ]cmake-2.8.5-Darwin-universal.tar.gz2011-07-08 10:34 28M 
    [TXT]cmake-2.8.5-IRIX64-64.sh2011-07-08 10:34 10M 
    [   ]cmake-2.8.5-IRIX64-64.tar.Z2011-07-08 10:34 15M 
    [   ]cmake-2.8.5-IRIX64-64.tar.gz2011-07-08 10:34 10M 
    [TXT]cmake-2.8.5-IRIX64-n32.sh2011-07-08 10:34 9.8M 
    [   ]cmake-2.8.5-IRIX64-n32.tar.Z2011-07-08 10:34 15M 
    [   ]cmake-2.8.5-IRIX64-n32.tar.gz2011-07-08 10:34 9.8M 
    [TXT]cmake-2.8.5-Linux-i386.sh2011-07-08 10:34 16M 
    [   ]cmake-2.8.5-Linux-i386.tar.Z2011-07-08 10:34 22M 
    [   ]cmake-2.8.5-Linux-i386.tar.gz2011-07-08 10:34 16M 
    [TXT]cmake-2.8.5-SunOS-sparc.sh2011-07-08 10:34 11M 
    [   ]cmake-2.8.5-SunOS-sparc.tar.Z2011-07-08 10:34 15M 
    [   ]cmake-2.8.5-SunOS-sparc.tar.gz2011-07-08 10:34 11M 
    [   ]cmake-2.8.5-win32-x86.exe2011-07-08 10:34 8.0M 
    [   ]cmake-2.8.5-win32-x86.zip2011-07-08 10:34 10M 
    [   ]cmake-2.8.5.tar.Z2011-07-08 10:34 8.3M 
    [   ]cmake-2.8.5.tar.gz2011-07-08 10:34 5.3M 
    [   ]cmake-2.8.5.zip2011-07-08 10:34 7.0M 
    [   ]cmake-2.8.6-1-src.tar.bz22011-10-04 13:59 4.1M 
    [   ]cmake-2.8.6-1.tar.bz22011-10-04 13:59 6.4M 
    [TXT]cmake-2.8.6-AIX-powerpc.sh2011-10-04 13:59 17M 
    [   ]cmake-2.8.6-AIX-powerpc.tar.Z2011-10-04 13:59 25M 
    [   ]cmake-2.8.6-AIX-powerpc.tar.gz2011-10-04 13:59 17M 
    [   ]cmake-2.8.6-Darwin-universal.dmg2011-10-04 13:59 28M 
    [   ]cmake-2.8.6-Darwin-universal.tar.Z2011-10-04 13:59 40M 
    [   ]cmake-2.8.6-Darwin-universal.tar.gz2011-10-04 13:59 28M 
    [   ]cmake-2.8.6-Darwin64-universal.dmg2011-10-04 13:59 29M 
    [   ]cmake-2.8.6-Darwin64-universal.tar.Z2011-10-04 13:59 41M 
    [   ]cmake-2.8.6-Darwin64-universal.tar.gz2011-10-04 13:59 29M 
    [TXT]cmake-2.8.6-IRIX64-64.sh2011-10-04 13:59 10M 
    [   ]cmake-2.8.6-IRIX64-64.tar.Z2011-10-04 13:59 15M 
    [   ]cmake-2.8.6-IRIX64-64.tar.gz2011-10-04 13:59 10M 
    [TXT]cmake-2.8.6-IRIX64-n32.sh2011-10-04 13:59 10M 
    [   ]cmake-2.8.6-IRIX64-n32.tar.Z2011-10-04 13:59 15M 
    [   ]cmake-2.8.6-IRIX64-n32.tar.gz2011-10-04 13:59 10M 
    [TXT]cmake-2.8.6-Linux-i386.sh2011-10-04 13:59 16M 
    [   ]cmake-2.8.6-Linux-i386.tar.Z2011-10-04 13:59 23M 
    [   ]cmake-2.8.6-Linux-i386.tar.gz2011-10-04 13:58 16M 
    [   ]cmake-2.8.6-win32-x86.exe2011-10-04 13:58 8.2M 
    [   ]cmake-2.8.6-win32-x86.zip2011-10-04 13:58 10M 
    [   ]cmake-2.8.6.tar.Z2011-10-04 13:58 8.4M 
    [   ]cmake-2.8.6.tar.gz2011-10-04 13:58 5.3M 
    [   ]cmake-2.8.6.zip2011-10-04 13:58 7.1M 
    [   ]cmake-2.8.7-1-src.tar.bz22011-12-30 14:14 4.2M 
    [   ]cmake-2.8.7-1.tar.bz22011-12-30 14:14 6.5M 
    [TXT]cmake-2.8.7-AIX-powerpc.sh2012-01-03 16:51 17M 
    [   ]cmake-2.8.7-AIX-powerpc.tar.Z2012-01-03 16:51 25M 
    [   ]cmake-2.8.7-AIX-powerpc.tar.gz2012-01-03 16:51 17M 
    [   ]cmake-2.8.7-Darwin-universal.dmg2011-12-30 14:14 29M 
    [   ]cmake-2.8.7-Darwin-universal.tar.Z2011-12-30 14:14 41M 
    [   ]cmake-2.8.7-Darwin-universal.tar.gz2011-12-30 14:14 28M 
    [   ]cmake-2.8.7-Darwin64-universal.dmg2011-12-30 14:14 30M 
    [   ]cmake-2.8.7-Darwin64-universal.tar.Z2011-12-30 14:13 42M 
    [   ]cmake-2.8.7-Darwin64-universal.tar.gz2011-12-30 14:13 30M 
    [TXT]cmake-2.8.7-IRIX64-64.sh2011-12-30 14:13 10M 
    [   ]cmake-2.8.7-IRIX64-64.tar.Z2011-12-30 14:13 15M 
    [   ]cmake-2.8.7-IRIX64-64.tar.gz2011-12-30 14:13 10M 
    [TXT]cmake-2.8.7-IRIX64-n32.sh2011-12-30 14:13 10M 
    [   ]cmake-2.8.7-IRIX64-n32.tar.Z2011-12-30 14:13 15M 
    [   ]cmake-2.8.7-IRIX64-n32.tar.gz2011-12-30 14:13 10M 
    [TXT]cmake-2.8.7-Linux-i386.sh2011-12-30 14:13 16M 
    [   ]cmake-2.8.7-Linux-i386.tar.Z2011-12-30 14:13 23M 
    [   ]cmake-2.8.7-Linux-i386.tar.gz2011-12-30 14:13 16M 
    [   ]cmake-2.8.7-win32-x86.exe2011-12-30 14:13 8.2M 
    [   ]cmake-2.8.7-win32-x86.zip2011-12-30 14:13 10M 
    [   ]cmake-2.8.7.tar.Z2011-12-30 14:13 8.5M 
    [   ]cmake-2.8.7.tar.gz2011-12-30 14:13 5.4M 
    [   ]cmake-2.8.7.zip2011-12-30 14:13 7.2M 
    [TXT]cmake-2.8.8-AIX-powerpc.sh2012-04-18 15:23 19M 
    [   ]cmake-2.8.8-AIX-powerpc.tar.Z2012-04-18 15:23 27M 
    [   ]cmake-2.8.8-AIX-powerpc.tar.gz2012-04-18 15:23 19M 
    [   ]cmake-2.8.8-Darwin-universal.dmg2012-04-18 15:23 34M 
    [   ]cmake-2.8.8-Darwin-universal.tar.Z2012-04-18 15:23 49M 
    [   ]cmake-2.8.8-Darwin-universal.tar.gz2012-04-18 15:23 34M 
    [   ]cmake-2.8.8-Darwin64-universal.dmg2012-04-18 15:22 32M 
    [   ]cmake-2.8.8-Darwin64-universal.tar.Z2012-04-18 15:22 44M 
    [   ]cmake-2.8.8-Darwin64-universal.tar.gz2012-04-18 15:22 32M 
    [TXT]cmake-2.8.8-IRIX64-64.sh2012-04-18 15:22 12M 
    [   ]cmake-2.8.8-IRIX64-64.tar.Z2012-04-18 15:22 17M 
    [   ]cmake-2.8.8-IRIX64-64.tar.gz2012-04-18 15:22 12M 
    [TXT]cmake-2.8.8-IRIX64-n32.sh2012-04-18 15:22 11M 
    [   ]cmake-2.8.8-IRIX64-n32.tar.Z2012-04-18 15:22 17M 
    [   ]cmake-2.8.8-IRIX64-n32.tar.gz2012-04-18 15:22 11M 
    [TXT]cmake-2.8.8-Linux-i386.sh2012-04-18 15:22 18M 
    [   ]cmake-2.8.8-Linux-i386.tar.Z2012-04-18 15:22 26M 
    [   ]cmake-2.8.8-Linux-i386.tar.gz2012-04-18 15:22 18M 
    [TXT]cmake-2.8.8-SHA-256.txt2016-04-13 12:48 2.1K 
    [TXT]cmake-2.8.8-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.8-win32-x86.exe2012-04-18 15:22 8.7M 
    [   ]cmake-2.8.8-win32-x86.zip2012-04-18 15:22 11M 
    [   ]cmake-2.8.8.tar.Z2012-04-18 15:22 8.5M 
    [   ]cmake-2.8.8.tar.gz2012-04-18 15:22 5.4M 
    [   ]cmake-2.8.8.zip2012-04-18 15:22 7.2M 
    [TXT]cmake-2.8.9-AIX-powerpc.sh2012-08-09 15:36 19M 
    [   ]cmake-2.8.9-AIX-powerpc.tar.Z2012-08-09 15:36 27M 
    [   ]cmake-2.8.9-AIX-powerpc.tar.gz2012-08-09 15:36 19M 
    [   ]cmake-2.8.9-Darwin-universal.dmg2012-08-09 15:36 35M 
    [   ]cmake-2.8.9-Darwin-universal.tar.Z2012-08-09 15:36 50M 
    [   ]cmake-2.8.9-Darwin-universal.tar.gz2012-08-09 15:36 35M 
    [   ]cmake-2.8.9-Darwin64-universal.dmg2012-08-09 15:36 33M 
    [   ]cmake-2.8.9-Darwin64-universal.tar.Z2012-08-09 15:35 45M 
    [   ]cmake-2.8.9-Darwin64-universal.tar.gz2012-08-09 15:35 32M 
    [TXT]cmake-2.8.9-IRIX64-64.sh2012-08-09 15:35 12M 
    [   ]cmake-2.8.9-IRIX64-64.tar.Z2012-08-09 15:35 17M 
    [   ]cmake-2.8.9-IRIX64-64.tar.gz2012-08-09 15:35 12M 
    [TXT]cmake-2.8.9-IRIX64-n32.sh2012-08-09 15:35 12M 
    [   ]cmake-2.8.9-IRIX64-n32.tar.Z2012-08-09 15:35 17M 
    [   ]cmake-2.8.9-IRIX64-n32.tar.gz2012-08-09 15:35 12M 
    [TXT]cmake-2.8.9-Linux-i386.sh2012-08-09 15:35 19M 
    [   ]cmake-2.8.9-Linux-i386.tar.Z2012-08-09 15:35 26M 
    [   ]cmake-2.8.9-Linux-i386.tar.gz2012-08-09 15:35 19M 
    [TXT]cmake-2.8.9-SHA-256.txt2016-04-13 12:48 2.1K 
    [TXT]cmake-2.8.9-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.9-win32-x86.exe2012-08-09 15:35 8.9M 
    [   ]cmake-2.8.9-win32-x86.zip2012-08-09 15:35 11M 
    [   ]cmake-2.8.9.tar.Z2012-08-09 15:35 8.5M 
    [   ]cmake-2.8.9.tar.gz2012-08-09 15:35 5.4M 
    [   ]cmake-2.8.9.zip2012-08-09 15:35 7.2M 
    [TXT]cmake-2.8.10-AIX-powerpc.sh2012-10-31 13:05 20M 
    [   ]cmake-2.8.10-AIX-powerpc.tar.Z2012-10-31 13:05 28M 
    [   ]cmake-2.8.10-AIX-powerpc.tar.gz2012-10-31 13:05 20M 
    [   ]cmake-2.8.10-Darwin-universal.dmg2012-10-31 13:04 41M 
    [   ]cmake-2.8.10-Darwin-universal.tar.Z2012-10-31 13:04 58M 
    [   ]cmake-2.8.10-Darwin-universal.tar.gz2012-10-31 13:04 41M 
    [   ]cmake-2.8.10-Darwin64-universal.dmg2012-10-31 13:04 38M 
    [   ]cmake-2.8.10-Darwin64-universal.tar.Z2012-10-31 13:04 53M 
    [   ]cmake-2.8.10-Darwin64-universal.tar.gz2012-10-31 13:04 38M 
    [TXT]cmake-2.8.10-IRIX64-64.sh2012-10-31 13:04 12M 
    [   ]cmake-2.8.10-IRIX64-64.tar.Z2012-10-31 13:04 18M 
    [   ]cmake-2.8.10-IRIX64-64.tar.gz2012-10-31 13:04 12M 
    [TXT]cmake-2.8.10-IRIX64-n32.sh2012-10-31 13:04 12M 
    [   ]cmake-2.8.10-IRIX64-n32.tar.Z2012-10-31 13:04 17M 
    [   ]cmake-2.8.10-IRIX64-n32.tar.gz2012-10-31 13:04 12M 
    [TXT]cmake-2.8.10-Linux-i386.sh2012-10-31 13:04 22M 
    [   ]cmake-2.8.10-Linux-i386.tar.Z2012-10-31 13:04 30M 
    [   ]cmake-2.8.10-Linux-i386.tar.gz2012-10-31 13:04 22M 
    [TXT]cmake-2.8.10-SHA-256.txt2016-04-13 12:48 2.1K 
    [TXT]cmake-2.8.10-SHA-256.txt.asc2016-04-13 12:48 819  
    [TXT]cmake-2.8.10-rc1-AIX-powerpc.sh2012-10-02 13:28 20M 
    [   ]cmake-2.8.10-rc1-AIX-powerpc.tar.Z2012-10-02 13:28 28M 
    [   ]cmake-2.8.10-rc1-AIX-powerpc.tar.gz2012-10-02 13:28 20M 
    [   ]cmake-2.8.10-rc1-Darwin-universal.dmg2012-10-02 13:28 41M 
    [   ]cmake-2.8.10-rc1-Darwin-universal.tar.Z2012-10-02 13:28 58M 
    [   ]cmake-2.8.10-rc1-Darwin-universal.tar.gz2012-10-02 13:28 40M 
    [   ]cmake-2.8.10-rc1-Darwin64-universal.dmg2012-10-02 13:28 38M 
    [   ]cmake-2.8.10-rc1-Darwin64-universal.tar.Z2012-10-02 13:28 53M 
    [   ]cmake-2.8.10-rc1-Darwin64-universal.tar.gz2012-10-02 13:28 38M 
    [TXT]cmake-2.8.10-rc1-IRIX64-64.sh2012-10-02 13:27 12M 
    [   ]cmake-2.8.10-rc1-IRIX64-64.tar.Z2012-10-02 13:27 18M 
    [   ]cmake-2.8.10-rc1-IRIX64-64.tar.gz2012-10-02 13:27 12M 
    [TXT]cmake-2.8.10-rc1-IRIX64-n32.sh2012-10-02 13:27 12M 
    [   ]cmake-2.8.10-rc1-IRIX64-n32.tar.Z2012-10-02 13:27 17M 
    [   ]cmake-2.8.10-rc1-IRIX64-n32.tar.gz2012-10-02 13:27 12M 
    [TXT]cmake-2.8.10-rc1-Linux-i386.sh2012-10-02 13:27 22M 
    [   ]cmake-2.8.10-rc1-Linux-i386.tar.Z2012-10-02 13:27 30M 
    [   ]cmake-2.8.10-rc1-Linux-i386.tar.gz2012-10-02 13:27 22M 
    [TXT]cmake-2.8.10-rc1-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.10-rc1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.10-rc1-win32-x86.exe2012-10-02 13:27 9.6M 
    [   ]cmake-2.8.10-rc1-win32-x86.zip2012-10-02 13:27 12M 
    [   ]cmake-2.8.10-rc1.tar.Z2012-10-02 13:27 8.6M 
    [   ]cmake-2.8.10-rc1.tar.gz2012-10-02 13:27 5.5M 
    [   ]cmake-2.8.10-rc1.zip2012-10-02 13:27 7.4M 
    [TXT]cmake-2.8.10-rc2-AIX-powerpc.sh2012-10-19 09:26 20M 
    [   ]cmake-2.8.10-rc2-AIX-powerpc.tar.Z2012-10-19 09:26 28M 
    [   ]cmake-2.8.10-rc2-AIX-powerpc.tar.gz2012-10-19 09:26 20M 
    [   ]cmake-2.8.10-rc2-Darwin-universal.dmg2012-10-19 09:26 41M 
    [   ]cmake-2.8.10-rc2-Darwin-universal.tar.Z2012-10-19 09:26 58M 
    [   ]cmake-2.8.10-rc2-Darwin-universal.tar.gz2012-10-19 09:26 41M 
    [   ]cmake-2.8.10-rc2-Darwin64-universal.dmg2012-10-19 09:25 38M 
    [   ]cmake-2.8.10-rc2-Darwin64-universal.tar.Z2012-10-19 09:25 53M 
    [   ]cmake-2.8.10-rc2-Darwin64-universal.tar.gz2012-10-19 09:25 38M 
    [TXT]cmake-2.8.10-rc2-IRIX64-64.sh2012-10-19 09:25 12M 
    [   ]cmake-2.8.10-rc2-IRIX64-64.tar.Z2012-10-19 09:25 18M 
    [   ]cmake-2.8.10-rc2-IRIX64-64.tar.gz2012-10-19 09:25 12M 
    [TXT]cmake-2.8.10-rc2-IRIX64-n32.sh2012-10-19 09:25 12M 
    [   ]cmake-2.8.10-rc2-IRIX64-n32.tar.Z2012-10-19 09:25 17M 
    [   ]cmake-2.8.10-rc2-IRIX64-n32.tar.gz2012-10-19 09:25 12M 
    [TXT]cmake-2.8.10-rc2-Linux-i386.sh2012-10-19 09:25 22M 
    [   ]cmake-2.8.10-rc2-Linux-i386.tar.Z2012-10-19 09:24 30M 
    [   ]cmake-2.8.10-rc2-Linux-i386.tar.gz2012-10-19 09:24 22M 
    [TXT]cmake-2.8.10-rc2-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.10-rc2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.10-rc2-win32-x86.exe2012-10-19 09:24 9.6M 
    [   ]cmake-2.8.10-rc2-win32-x86.zip2012-10-19 09:24 12M 
    [   ]cmake-2.8.10-rc2.tar.Z2012-10-19 09:24 8.6M 
    [   ]cmake-2.8.10-rc2.tar.gz2012-10-19 09:24 5.5M 
    [   ]cmake-2.8.10-rc2.zip2012-10-19 09:24 7.4M 
    [TXT]cmake-2.8.10-rc3-AIX-powerpc.sh2012-10-24 15:11 20M 
    [   ]cmake-2.8.10-rc3-AIX-powerpc.tar.Z2012-10-24 15:11 28M 
    [   ]cmake-2.8.10-rc3-AIX-powerpc.tar.gz2012-10-24 15:10 20M 
    [   ]cmake-2.8.10-rc3-Darwin-universal.dmg2012-10-24 15:10 41M 
    [   ]cmake-2.8.10-rc3-Darwin-universal.tar.Z2012-10-24 15:10 58M 
    [   ]cmake-2.8.10-rc3-Darwin-universal.tar.gz2012-10-24 15:10 41M 
    [   ]cmake-2.8.10-rc3-Darwin64-universal.dmg2012-10-24 15:10 38M 
    [   ]cmake-2.8.10-rc3-Darwin64-universal.tar.Z2012-10-24 15:10 53M 
    [   ]cmake-2.8.10-rc3-Darwin64-universal.tar.gz2012-10-24 15:10 38M 
    [TXT]cmake-2.8.10-rc3-IRIX64-64.sh2012-10-24 15:10 12M 
    [   ]cmake-2.8.10-rc3-IRIX64-64.tar.Z2012-10-24 15:10 18M 
    [   ]cmake-2.8.10-rc3-IRIX64-64.tar.gz2012-10-24 15:10 12M 
    [TXT]cmake-2.8.10-rc3-IRIX64-n32.sh2012-10-24 15:10 12M 
    [   ]cmake-2.8.10-rc3-IRIX64-n32.tar.Z2012-10-24 15:10 17M 
    [   ]cmake-2.8.10-rc3-IRIX64-n32.tar.gz2012-10-24 15:10 12M 
    [TXT]cmake-2.8.10-rc3-Linux-i386.sh2012-10-24 15:10 22M 
    [   ]cmake-2.8.10-rc3-Linux-i386.tar.Z2012-10-24 15:10 30M 
    [   ]cmake-2.8.10-rc3-Linux-i386.tar.gz2012-10-24 15:10 22M 
    [TXT]cmake-2.8.10-rc3-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.10-rc3-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.10-rc3-win32-x86.exe2012-10-24 15:10 9.6M 
    [   ]cmake-2.8.10-rc3-win32-x86.zip2012-10-24 15:09 12M 
    [   ]cmake-2.8.10-rc3.tar.Z2012-10-24 15:09 8.7M 
    [   ]cmake-2.8.10-rc3.tar.gz2012-10-24 15:09 5.5M 
    [   ]cmake-2.8.10-rc3.zip2012-10-24 15:09 7.4M 
    [   ]cmake-2.8.10-win32-x86.exe2012-10-31 13:04 9.6M 
    [   ]cmake-2.8.10-win32-x86.zip2012-10-31 13:04 12M 
    [TXT]cmake-2.8.10.1-AIX-powerpc.sh2012-11-07 11:48 20M 
    [   ]cmake-2.8.10.1-AIX-powerpc.tar.Z2012-11-07 11:48 28M 
    [   ]cmake-2.8.10.1-AIX-powerpc.tar.gz2012-11-07 11:48 20M 
    [   ]cmake-2.8.10.1-Darwin-universal.dmg2012-11-07 11:48 41M 
    [   ]cmake-2.8.10.1-Darwin-universal.tar.Z2012-11-07 11:48 58M 
    [   ]cmake-2.8.10.1-Darwin-universal.tar.gz2012-11-07 11:48 41M 
    [   ]cmake-2.8.10.1-Darwin64-universal.dmg2012-11-07 11:48 38M 
    [   ]cmake-2.8.10.1-Darwin64-universal.tar.Z2012-11-07 11:48 53M 
    [   ]cmake-2.8.10.1-Darwin64-universal.tar.gz2012-11-07 11:48 38M 
    [TXT]cmake-2.8.10.1-IRIX64-64.sh2012-11-07 11:48 12M 
    [   ]cmake-2.8.10.1-IRIX64-64.tar.Z2012-11-07 11:48 18M 
    [   ]cmake-2.8.10.1-IRIX64-64.tar.gz2012-11-07 11:48 12M 
    [TXT]cmake-2.8.10.1-IRIX64-n32.sh2012-11-07 11:47 12M 
    [   ]cmake-2.8.10.1-IRIX64-n32.tar.Z2012-11-07 11:47 17M 
    [   ]cmake-2.8.10.1-IRIX64-n32.tar.gz2012-11-07 11:47 12M 
    [TXT]cmake-2.8.10.1-Linux-i386.sh2012-11-07 11:47 22M 
    [   ]cmake-2.8.10.1-Linux-i386.tar.Z2012-11-07 11:47 30M 
    [   ]cmake-2.8.10.1-Linux-i386.tar.gz2012-11-07 11:47 22M 
    [TXT]cmake-2.8.10.1-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.10.1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.10.1-win32-x86.exe2012-11-07 11:47 9.6M 
    [   ]cmake-2.8.10.1-win32-x86.zip2012-11-07 11:47 12M 
    [   ]cmake-2.8.10.1.tar.Z2012-11-07 11:47 8.6M 
    [   ]cmake-2.8.10.1.tar.gz2012-11-07 11:47 5.5M 
    [   ]cmake-2.8.10.1.zip2012-11-07 11:47 7.4M 
    [TXT]cmake-2.8.10.2-AIX-powerpc.sh2012-11-27 15:05 20M 
    [   ]cmake-2.8.10.2-AIX-powerpc.tar.Z2012-11-27 15:05 28M 
    [   ]cmake-2.8.10.2-AIX-powerpc.tar.gz2012-11-27 15:04 20M 
    [   ]cmake-2.8.10.2-Darwin-universal.dmg2012-11-27 15:04 41M 
    [   ]cmake-2.8.10.2-Darwin-universal.tar.Z2012-11-27 15:04 58M 
    [   ]cmake-2.8.10.2-Darwin-universal.tar.gz2012-11-27 15:04 41M 
    [   ]cmake-2.8.10.2-Darwin64-universal.dmg2012-11-27 15:04 38M 
    [   ]cmake-2.8.10.2-Darwin64-universal.tar.Z2012-11-27 15:04 53M 
    [   ]cmake-2.8.10.2-Darwin64-universal.tar.gz2012-11-27 15:04 38M 
    [TXT]cmake-2.8.10.2-IRIX64-64.sh2012-11-27 15:04 12M 
    [   ]cmake-2.8.10.2-IRIX64-64.tar.Z2012-11-27 15:04 18M 
    [   ]cmake-2.8.10.2-IRIX64-64.tar.gz2012-11-27 15:04 12M 
    [TXT]cmake-2.8.10.2-IRIX64-n32.sh2012-11-27 15:04 12M 
    [   ]cmake-2.8.10.2-IRIX64-n32.tar.Z2012-11-27 15:04 17M 
    [   ]cmake-2.8.10.2-IRIX64-n32.tar.gz2012-11-27 15:04 12M 
    [TXT]cmake-2.8.10.2-Linux-i386.sh2012-11-27 15:04 22M 
    [   ]cmake-2.8.10.2-Linux-i386.tar.Z2012-11-27 15:04 30M 
    [   ]cmake-2.8.10.2-Linux-i386.tar.gz2012-11-27 15:04 22M 
    [TXT]cmake-2.8.10.2-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.10.2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.10.2-win32-x86.exe2012-11-27 15:04 9.6M 
    [   ]cmake-2.8.10.2-win32-x86.zip2012-11-27 15:03 12M 
    [   ]cmake-2.8.10.2.tar.Z2012-11-27 15:03 8.7M 
    [   ]cmake-2.8.10.2.tar.gz2012-11-27 15:03 5.5M 
    [   ]cmake-2.8.10.2.zip2012-11-27 15:03 7.4M 
    [   ]cmake-2.8.10.tar.Z2012-10-31 13:03 8.7M 
    [   ]cmake-2.8.10.tar.gz2012-10-31 13:03 5.5M 
    [   ]cmake-2.8.10.zip2012-10-31 13:03 7.4M 
    [TXT]cmake-2.8.11-AIX-powerpc.sh2013-06-07 14:42 21M 
    [   ]cmake-2.8.11-AIX-powerpc.tar.Z2013-06-07 14:42 30M 
    [   ]cmake-2.8.11-AIX-powerpc.tar.gz2013-06-07 14:42 21M 
    [   ]cmake-2.8.11-Darwin-universal.dmg2013-05-15 15:54 42M 
    [   ]cmake-2.8.11-Darwin-universal.tar.Z2013-05-15 15:54 59M 
    [   ]cmake-2.8.11-Darwin-universal.tar.gz2013-05-15 15:54 42M 
    [   ]cmake-2.8.11-Darwin64-universal.dmg2013-05-15 15:54 39M 
    [   ]cmake-2.8.11-Darwin64-universal.tar.Z2013-05-15 15:54 55M 
    [   ]cmake-2.8.11-Darwin64-universal.tar.gz2013-05-15 15:54 39M 
    [TXT]cmake-2.8.11-IRIX64-64.sh2013-05-15 15:53 13M 
    [   ]cmake-2.8.11-IRIX64-64.tar.Z2013-05-15 15:53 19M 
    [   ]cmake-2.8.11-IRIX64-64.tar.gz2013-05-15 15:53 13M 
    [TXT]cmake-2.8.11-IRIX64-n32.sh2013-05-15 15:53 12M 
    [   ]cmake-2.8.11-IRIX64-n32.tar.Z2013-05-15 15:53 18M 
    [   ]cmake-2.8.11-IRIX64-n32.tar.gz2013-05-15 15:53 12M 
    [TXT]cmake-2.8.11-Linux-i386.sh2013-05-15 15:53 23M 
    [   ]cmake-2.8.11-Linux-i386.tar.Z2013-05-15 15:53 31M 
    [   ]cmake-2.8.11-Linux-i386.tar.gz2013-05-15 15:53 23M 
    [TXT]cmake-2.8.11-SHA-256.txt2016-04-13 12:48 2.4K 
    [TXT]cmake-2.8.11-SHA-256.txt.asc2016-04-13 12:48 819  
    [TXT]cmake-2.8.11-rc1-AIX-powerpc.sh2013-03-14 17:16 21M 
    [   ]cmake-2.8.11-rc1-AIX-powerpc.tar.Z2013-03-14 17:16 30M 
    [   ]cmake-2.8.11-rc1-AIX-powerpc.tar.gz2013-03-14 17:16 21M 
    [   ]cmake-2.8.11-rc1-Darwin-universal.dmg2013-03-14 17:16 42M 
    [   ]cmake-2.8.11-rc1-Darwin-universal.tar.Z2013-03-14 17:16 59M 
    [   ]cmake-2.8.11-rc1-Darwin-universal.tar.gz2013-03-14 17:16 41M 
    [   ]cmake-2.8.11-rc1-Darwin64-universal.dmg2013-03-14 17:15 39M 
    [   ]cmake-2.8.11-rc1-Darwin64-universal.tar.Z2013-03-14 17:15 55M 
    [   ]cmake-2.8.11-rc1-Darwin64-universal.tar.gz2013-03-14 17:15 39M 
    [TXT]cmake-2.8.11-rc1-IRIX64-64.sh2013-03-14 17:15 13M 
    [   ]cmake-2.8.11-rc1-IRIX64-64.tar.Z2013-03-14 17:15 19M 
    [   ]cmake-2.8.11-rc1-IRIX64-64.tar.gz2013-03-14 17:15 13M 
    [TXT]cmake-2.8.11-rc1-IRIX64-n32.sh2013-03-14 17:15 12M 
    [   ]cmake-2.8.11-rc1-IRIX64-n32.tar.Z2013-03-14 17:15 18M 
    [   ]cmake-2.8.11-rc1-IRIX64-n32.tar.gz2013-03-14 17:15 12M 
    [TXT]cmake-2.8.11-rc1-Linux-i386.sh2013-03-14 17:15 23M 
    [   ]cmake-2.8.11-rc1-Linux-i386.tar.Z2013-03-14 17:15 31M 
    [   ]cmake-2.8.11-rc1-Linux-i386.tar.gz2013-03-14 17:15 23M 
    [TXT]cmake-2.8.11-rc1-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.11-rc1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.11-rc1-win32-x86.exe2013-03-14 17:15 9.9M 
    [   ]cmake-2.8.11-rc1-win32-x86.zip2013-03-14 17:14 12M 
    [   ]cmake-2.8.11-rc1.tar.Z2013-03-14 17:14 8.8M 
    [   ]cmake-2.8.11-rc1.tar.gz2013-03-14 17:14 5.6M 
    [   ]cmake-2.8.11-rc1.zip2013-03-14 17:14 7.6M 
    [TXT]cmake-2.8.11-rc2-AIX-powerpc.sh2013-04-05 18:01 21M 
    [   ]cmake-2.8.11-rc2-AIX-powerpc.tar.Z2013-04-05 18:01 30M 
    [   ]cmake-2.8.11-rc2-AIX-powerpc.tar.gz2013-04-05 18:01 21M 
    [   ]cmake-2.8.11-rc2-Darwin-universal.dmg2013-04-05 18:01 42M 
    [   ]cmake-2.8.11-rc2-Darwin-universal.tar.Z2013-04-05 18:01 59M 
    [   ]cmake-2.8.11-rc2-Darwin-universal.tar.gz2013-04-05 18:01 42M 
    [   ]cmake-2.8.11-rc2-Darwin64-universal.dmg2013-04-05 18:01 39M 
    [   ]cmake-2.8.11-rc2-Darwin64-universal.tar.Z2013-04-05 18:01 55M 
    [   ]cmake-2.8.11-rc2-Darwin64-universal.tar.gz2013-04-05 18:01 39M 
    [TXT]cmake-2.8.11-rc2-IRIX64-64.sh2013-04-05 18:01 13M 
    [   ]cmake-2.8.11-rc2-IRIX64-64.tar.Z2013-04-05 18:01 19M 
    [   ]cmake-2.8.11-rc2-IRIX64-64.tar.gz2013-04-05 18:01 13M 
    [TXT]cmake-2.8.11-rc2-IRIX64-n32.sh2013-04-05 18:01 12M 
    [   ]cmake-2.8.11-rc2-IRIX64-n32.tar.Z2013-04-05 18:01 18M 
    [   ]cmake-2.8.11-rc2-IRIX64-n32.tar.gz2013-04-05 18:00 12M 
    [TXT]cmake-2.8.11-rc2-Linux-i386.sh2013-04-05 18:00 23M 
    [   ]cmake-2.8.11-rc2-Linux-i386.tar.Z2013-04-05 18:00 31M 
    [   ]cmake-2.8.11-rc2-Linux-i386.tar.gz2013-04-05 18:00 23M 
    [TXT]cmake-2.8.11-rc2-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.11-rc2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.11-rc2-win32-x86.exe2013-04-05 18:00 9.9M 
    [   ]cmake-2.8.11-rc2-win32-x86.zip2013-04-05 18:00 12M 
    [   ]cmake-2.8.11-rc2.tar.Z2013-04-05 18:00 8.8M 
    [   ]cmake-2.8.11-rc2.tar.gz2013-04-05 18:00 5.6M 
    [   ]cmake-2.8.11-rc2.zip2013-04-05 18:00 7.6M 
    [TXT]cmake-2.8.11-rc3-AIX-powerpc.sh2013-04-18 17:33 21M 
    [   ]cmake-2.8.11-rc3-AIX-powerpc.tar.Z2013-04-18 17:33 30M 
    [   ]cmake-2.8.11-rc3-AIX-powerpc.tar.gz2013-04-18 17:33 21M 
    [   ]cmake-2.8.11-rc3-Darwin-universal.dmg2013-04-18 17:33 42M 
    [   ]cmake-2.8.11-rc3-Darwin-universal.tar.Z2013-04-18 17:33 59M 
    [   ]cmake-2.8.11-rc3-Darwin-universal.tar.gz2013-04-18 17:33 42M 
    [   ]cmake-2.8.11-rc3-Darwin64-universal.dmg2013-04-18 17:33 39M 
    [   ]cmake-2.8.11-rc3-Darwin64-universal.tar.Z2013-04-18 17:33 55M 
    [   ]cmake-2.8.11-rc3-Darwin64-universal.tar.gz2013-04-18 17:33 39M 
    [TXT]cmake-2.8.11-rc3-IRIX64-64.sh2013-04-18 17:33 13M 
    [   ]cmake-2.8.11-rc3-IRIX64-64.tar.Z2013-04-18 17:33 19M 
    [   ]cmake-2.8.11-rc3-IRIX64-64.tar.gz2013-04-18 17:33 13M 
    [TXT]cmake-2.8.11-rc3-IRIX64-n32.sh2013-04-18 17:33 12M 
    [   ]cmake-2.8.11-rc3-IRIX64-n32.tar.Z2013-04-18 17:33 18M 
    [   ]cmake-2.8.11-rc3-IRIX64-n32.tar.gz2013-04-18 17:33 12M 
    [TXT]cmake-2.8.11-rc3-Linux-i386.sh2013-04-18 17:32 23M 
    [   ]cmake-2.8.11-rc3-Linux-i386.tar.Z2013-04-18 17:32 31M 
    [   ]cmake-2.8.11-rc3-Linux-i386.tar.gz2013-04-18 17:32 23M 
    [TXT]cmake-2.8.11-rc3-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.11-rc3-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.11-rc3-win32-x86.exe2013-04-18 17:32 9.9M 
    [   ]cmake-2.8.11-rc3-win32-x86.zip2013-04-18 17:32 12M 
    [   ]cmake-2.8.11-rc3.tar.Z2013-04-18 17:32 8.8M 
    [   ]cmake-2.8.11-rc3.tar.gz2013-04-18 17:32 5.6M 
    [   ]cmake-2.8.11-rc3.zip2013-04-18 17:32 7.6M 
    [TXT]cmake-2.8.11-rc4-AIX-powerpc.sh2013-05-08 09:54 21M 
    [   ]cmake-2.8.11-rc4-AIX-powerpc.tar.Z2013-05-08 09:54 30M 
    [   ]cmake-2.8.11-rc4-AIX-powerpc.tar.gz2013-05-08 09:54 21M 
    [   ]cmake-2.8.11-rc4-Darwin-universal.dmg2013-05-08 09:54 42M 
    [   ]cmake-2.8.11-rc4-Darwin-universal.tar.Z2013-05-08 09:54 59M 
    [   ]cmake-2.8.11-rc4-Darwin-universal.tar.gz2013-05-08 09:54 42M 
    [   ]cmake-2.8.11-rc4-Darwin64-universal.dmg2013-05-08 09:54 39M 
    [   ]cmake-2.8.11-rc4-Darwin64-universal.tar.Z2013-05-08 09:54 55M 
    [   ]cmake-2.8.11-rc4-Darwin64-universal.tar.gz2013-05-08 09:54 39M 
    [TXT]cmake-2.8.11-rc4-IRIX64-64.sh2013-05-08 09:54 13M 
    [   ]cmake-2.8.11-rc4-IRIX64-64.tar.Z2013-05-08 09:54 19M 
    [   ]cmake-2.8.11-rc4-IRIX64-64.tar.gz2013-05-08 09:53 13M 
    [TXT]cmake-2.8.11-rc4-IRIX64-n32.sh2013-05-08 09:53 12M 
    [   ]cmake-2.8.11-rc4-IRIX64-n32.tar.Z2013-05-08 09:53 18M 
    [   ]cmake-2.8.11-rc4-IRIX64-n32.tar.gz2013-05-08 09:53 12M 
    [TXT]cmake-2.8.11-rc4-Linux-i386.sh2013-05-08 09:53 23M 
    [   ]cmake-2.8.11-rc4-Linux-i386.tar.Z2013-05-08 09:53 31M 
    [   ]cmake-2.8.11-rc4-Linux-i386.tar.gz2013-05-08 09:53 23M 
    [TXT]cmake-2.8.11-rc4-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.11-rc4-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.11-rc4-win32-x86.exe2013-05-08 09:53 9.9M 
    [   ]cmake-2.8.11-rc4-win32-x86.zip2013-05-08 09:53 12M 
    [   ]cmake-2.8.11-rc4.tar.Z2013-05-08 09:53 8.9M 
    [   ]cmake-2.8.11-rc4.tar.gz2013-05-08 09:53 5.6M 
    [   ]cmake-2.8.11-rc4.zip2013-05-08 09:53 7.6M 
    [   ]cmake-2.8.11-win32-x86.exe2013-05-15 15:53 9.9M 
    [   ]cmake-2.8.11-win32-x86.zip2013-05-15 15:53 12M 
    [TXT]cmake-2.8.11.1-AIX-powerpc.sh2013-06-07 14:41 21M 
    [   ]cmake-2.8.11.1-AIX-powerpc.tar.Z2013-06-07 14:41 30M 
    [   ]cmake-2.8.11.1-AIX-powerpc.tar.gz2013-06-07 14:41 21M 
    [   ]cmake-2.8.11.1-Darwin-universal.dmg2013-06-07 14:41 42M 
    [   ]cmake-2.8.11.1-Darwin-universal.tar.Z2013-06-07 14:41 59M 
    [   ]cmake-2.8.11.1-Darwin-universal.tar.gz2013-06-07 14:41 42M 
    [   ]cmake-2.8.11.1-Darwin64-universal.dmg2013-06-07 14:41 39M 
    [   ]cmake-2.8.11.1-Darwin64-universal.tar.Z2013-06-07 14:41 55M 
    [   ]cmake-2.8.11.1-Darwin64-universal.tar.gz2013-06-07 14:41 39M 
    [TXT]cmake-2.8.11.1-IRIX64-64.sh2013-06-07 14:41 13M 
    [   ]cmake-2.8.11.1-IRIX64-64.tar.Z2013-06-07 14:41 19M 
    [   ]cmake-2.8.11.1-IRIX64-64.tar.gz2013-06-07 14:41 13M 
    [TXT]cmake-2.8.11.1-IRIX64-n32.sh2013-06-07 14:41 12M 
    [   ]cmake-2.8.11.1-IRIX64-n32.tar.Z2013-06-07 14:41 18M 
    [   ]cmake-2.8.11.1-IRIX64-n32.tar.gz2013-06-07 14:41 12M 
    [TXT]cmake-2.8.11.1-Linux-i386.sh2013-06-07 14:41 23M 
    [   ]cmake-2.8.11.1-Linux-i386.tar.Z2013-06-07 14:40 31M 
    [   ]cmake-2.8.11.1-Linux-i386.tar.gz2013-06-07 14:40 23M 
    [TXT]cmake-2.8.11.1-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.11.1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.11.1-win32-x86.exe2013-06-07 14:40 9.9M 
    [   ]cmake-2.8.11.1-win32-x86.zip2013-06-07 14:40 12M 
    [   ]cmake-2.8.11.1.tar.Z2013-06-07 14:40 8.9M 
    [   ]cmake-2.8.11.1.tar.gz2013-06-07 14:40 5.6M 
    [   ]cmake-2.8.11.1.zip2013-06-07 14:40 7.6M 
    [TXT]cmake-2.8.11.2-AIX-powerpc.sh2013-07-03 11:53 21M 
    [   ]cmake-2.8.11.2-AIX-powerpc.tar.Z2013-07-03 11:53 30M 
    [   ]cmake-2.8.11.2-AIX-powerpc.tar.gz2013-07-03 11:53 21M 
    [   ]cmake-2.8.11.2-Darwin-universal.dmg2013-07-03 11:53 42M 
    [   ]cmake-2.8.11.2-Darwin-universal.tar.Z2013-07-03 11:53 59M 
    [   ]cmake-2.8.11.2-Darwin-universal.tar.gz2013-07-03 11:53 42M 
    [   ]cmake-2.8.11.2-Darwin64-universal.dmg2013-07-03 11:53 39M 
    [   ]cmake-2.8.11.2-Darwin64-universal.tar.Z2013-07-03 11:53 55M 
    [   ]cmake-2.8.11.2-Darwin64-universal.tar.gz2013-07-03 11:53 39M 
    [TXT]cmake-2.8.11.2-IRIX64-64.sh2013-07-03 11:53 13M 
    [   ]cmake-2.8.11.2-IRIX64-64.tar.Z2013-07-03 11:53 19M 
    [   ]cmake-2.8.11.2-IRIX64-64.tar.gz2013-07-03 11:52 13M 
    [TXT]cmake-2.8.11.2-IRIX64-n32.sh2013-07-03 11:52 12M 
    [   ]cmake-2.8.11.2-IRIX64-n32.tar.Z2013-07-03 11:52 18M 
    [   ]cmake-2.8.11.2-IRIX64-n32.tar.gz2013-07-03 11:52 12M 
    [TXT]cmake-2.8.11.2-Linux-i386.sh2013-07-03 11:52 23M 
    [   ]cmake-2.8.11.2-Linux-i386.tar.Z2013-07-03 11:52 31M 
    [   ]cmake-2.8.11.2-Linux-i386.tar.gz2013-07-03 11:52 23M 
    [TXT]cmake-2.8.11.2-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.11.2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.11.2-win32-x86.exe2013-07-03 11:52 9.9M 
    [   ]cmake-2.8.11.2-win32-x86.zip2013-07-03 11:52 12M 
    [   ]cmake-2.8.11.2.tar.Z2013-07-03 11:52 8.9M 
    [   ]cmake-2.8.11.2.tar.gz2013-07-03 11:52 5.6M 
    [   ]cmake-2.8.11.2.zip2013-07-03 11:52 7.6M 
    [   ]cmake-2.8.11.tar.Z2013-05-15 15:53 8.9M 
    [   ]cmake-2.8.11.tar.gz2013-05-15 15:53 5.6M 
    [   ]cmake-2.8.11.zip2013-05-15 15:53 7.6M 
    [TXT]cmake-2.8.12-AIX-powerpc.sh2013-10-07 14:34 22M 
    [   ]cmake-2.8.12-AIX-powerpc.tar.Z2013-10-07 14:34 32M 
    [   ]cmake-2.8.12-AIX-powerpc.tar.gz2013-10-07 14:34 22M 
    [   ]cmake-2.8.12-Darwin-universal.dmg2013-10-07 14:34 43M 
    [   ]cmake-2.8.12-Darwin-universal.tar.Z2013-10-07 14:33 61M 
    [   ]cmake-2.8.12-Darwin-universal.tar.gz2013-10-07 14:33 43M 
    [   ]cmake-2.8.12-Darwin64-universal.dmg2013-10-07 14:33 41M 
    [   ]cmake-2.8.12-Darwin64-universal.tar.Z2013-10-07 14:33 57M 
    [   ]cmake-2.8.12-Darwin64-universal.tar.gz2013-10-07 14:33 40M 
    [TXT]cmake-2.8.12-IRIX64-64.sh2013-10-07 14:33 13M 
    [   ]cmake-2.8.12-IRIX64-64.tar.Z2013-10-07 14:33 20M 
    [   ]cmake-2.8.12-IRIX64-64.tar.gz2013-10-07 14:33 13M 
    [TXT]cmake-2.8.12-IRIX64-n32.sh2013-10-07 14:33 13M 
    [   ]cmake-2.8.12-IRIX64-n32.tar.Z2013-10-07 14:33 19M 
    [   ]cmake-2.8.12-IRIX64-n32.tar.gz2013-10-07 14:33 13M 
    [TXT]cmake-2.8.12-Linux-i386.sh2013-10-07 14:33 24M 
    [   ]cmake-2.8.12-Linux-i386.tar.Z2013-10-07 14:33 33M 
    [   ]cmake-2.8.12-Linux-i386.tar.gz2013-10-07 14:33 24M 
    [TXT]cmake-2.8.12-SHA-256.txt2016-04-13 12:48 2.1K 
    [TXT]cmake-2.8.12-SHA-256.txt.asc2016-04-13 12:48 819  
    [TXT]cmake-2.8.12-rc1-AIX-powerpc.sh2013-08-19 08:54 22M 
    [   ]cmake-2.8.12-rc1-AIX-powerpc.tar.Z2013-08-19 08:54 32M 
    [   ]cmake-2.8.12-rc1-AIX-powerpc.tar.gz2013-08-19 08:54 22M 
    [   ]cmake-2.8.12-rc1-Darwin-universal.dmg2013-08-19 08:54 43M 
    [   ]cmake-2.8.12-rc1-Darwin-universal.tar.Z2013-08-19 08:53 61M 
    [   ]cmake-2.8.12-rc1-Darwin-universal.tar.gz2013-08-19 08:53 43M 
    [   ]cmake-2.8.12-rc1-Darwin64-universal.dmg2013-08-19 08:53 40M 
    [   ]cmake-2.8.12-rc1-Darwin64-universal.tar.Z2013-08-19 08:53 57M 
    [   ]cmake-2.8.12-rc1-Darwin64-universal.tar.gz2013-08-19 08:53 40M 
    [TXT]cmake-2.8.12-rc1-IRIX64-64.sh2013-08-19 08:53 13M 
    [   ]cmake-2.8.12-rc1-IRIX64-64.tar.Z2013-08-19 08:53 20M 
    [   ]cmake-2.8.12-rc1-IRIX64-64.tar.gz2013-08-19 08:53 13M 
    [TXT]cmake-2.8.12-rc1-IRIX64-n32.sh2013-08-19 08:53 13M 
    [   ]cmake-2.8.12-rc1-IRIX64-n32.tar.Z2013-08-19 08:53 19M 
    [   ]cmake-2.8.12-rc1-IRIX64-n32.tar.gz2013-08-19 08:53 13M 
    [TXT]cmake-2.8.12-rc1-Linux-i386.sh2013-08-19 08:52 24M 
    [   ]cmake-2.8.12-rc1-Linux-i386.tar.Z2013-08-19 08:52 33M 
    [   ]cmake-2.8.12-rc1-Linux-i386.tar.gz2013-08-19 08:52 24M 
    [TXT]cmake-2.8.12-rc1-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.12-rc1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.12-rc1-win32-x86.exe2013-08-19 08:52 10M 
    [   ]cmake-2.8.12-rc1-win32-x86.zip2013-08-19 08:52 13M 
    [   ]cmake-2.8.12-rc1.tar.Z2013-08-19 08:52 9.1M 
    [   ]cmake-2.8.12-rc1.tar.gz2013-08-19 08:52 5.8M 
    [   ]cmake-2.8.12-rc1.zip2013-08-19 08:52 8.0M 
    [TXT]cmake-2.8.12-rc2-AIX-powerpc.sh2013-08-30 15:59 22M 
    [   ]cmake-2.8.12-rc2-AIX-powerpc.tar.Z2013-08-30 15:59 32M 
    [   ]cmake-2.8.12-rc2-AIX-powerpc.tar.gz2013-08-30 15:59 22M 
    [   ]cmake-2.8.12-rc2-Darwin-universal.dmg2013-08-30 15:59 43M 
    [   ]cmake-2.8.12-rc2-Darwin-universal.tar.Z2013-08-30 15:59 61M 
    [   ]cmake-2.8.12-rc2-Darwin-universal.tar.gz2013-08-30 15:59 43M 
    [   ]cmake-2.8.12-rc2-Darwin64-universal.dmg2013-08-30 15:59 40M 
    [   ]cmake-2.8.12-rc2-Darwin64-universal.tar.Z2013-08-30 15:59 57M 
    [   ]cmake-2.8.12-rc2-Darwin64-universal.tar.gz2013-08-30 15:58 40M 
    [   ]cmake-2.8.12-rc2-IRIX64-64.tar.Z2013-08-30 15:58 20M 
    [   ]cmake-2.8.12-rc2-IRIX64-64.tar.gz2013-08-30 15:58 13M 
    [TXT]cmake-2.8.12-rc2-IRIX64-n32.sh2013-08-30 15:58 13M 
    [   ]cmake-2.8.12-rc2-IRIX64-n32.tar.Z2013-08-30 15:58 19M 
    [   ]cmake-2.8.12-rc2-IRIX64-n32.tar.gz2013-08-30 15:58 13M 
    [TXT]cmake-2.8.12-rc2-Linux-i386.sh2013-08-30 15:58 24M 
    [   ]cmake-2.8.12-rc2-Linux-i386.tar.Z2013-08-30 15:58 33M 
    [   ]cmake-2.8.12-rc2-Linux-i386.tar.gz2013-08-30 15:58 24M 
    [TXT]cmake-2.8.12-rc2-SHA-256.txt2016-04-13 12:48 2.1K 
    [TXT]cmake-2.8.12-rc2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.12-rc2-win32-x86.exe2013-08-30 15:58 10M 
    [   ]cmake-2.8.12-rc2-win32-x86.zip2013-08-30 15:58 13M 
    [   ]cmake-2.8.12-rc2.tar.Z2013-08-30 15:58 9.2M 
    [   ]cmake-2.8.12-rc2.tar.gz2013-08-30 15:58 5.8M 
    [   ]cmake-2.8.12-rc2.zip2013-08-30 15:58 8.0M 
    [TXT]cmake-2.8.12-rc3-AIX-powerpc.sh2013-09-10 15:39 22M 
    [   ]cmake-2.8.12-rc3-AIX-powerpc.tar.Z2013-09-10 15:39 32M 
    [   ]cmake-2.8.12-rc3-AIX-powerpc.tar.gz2013-09-10 15:39 22M 
    [   ]cmake-2.8.12-rc3-Darwin-universal.dmg2013-09-10 15:39 43M 
    [   ]cmake-2.8.12-rc3-Darwin-universal.tar.Z2013-09-10 15:39 61M 
    [   ]cmake-2.8.12-rc3-Darwin-universal.tar.gz2013-09-10 15:39 43M 
    [   ]cmake-2.8.12-rc3-Darwin64-universal.dmg2013-09-10 15:39 41M 
    [   ]cmake-2.8.12-rc3-Darwin64-universal.tar.Z2013-09-10 15:39 57M 
    [   ]cmake-2.8.12-rc3-Darwin64-universal.tar.gz2013-09-10 15:39 40M 
    [TXT]cmake-2.8.12-rc3-Linux-i386.sh2013-09-10 15:39 24M 
    [   ]cmake-2.8.12-rc3-Linux-i386.tar.Z2013-09-10 15:39 33M 
    [   ]cmake-2.8.12-rc3-Linux-i386.tar.gz2013-09-10 15:39 24M 
    [TXT]cmake-2.8.12-rc3-SHA-256.txt2016-04-13 12:48 1.7K 
    [TXT]cmake-2.8.12-rc3-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.12-rc3-win32-x86.exe2013-09-10 15:39 10M 
    [   ]cmake-2.8.12-rc3-win32-x86.zip2013-09-10 15:38 13M 
    [   ]cmake-2.8.12-rc3.tar.Z2013-09-10 15:38 9.1M 
    [   ]cmake-2.8.12-rc3.tar.gz2013-09-10 15:38 5.8M 
    [   ]cmake-2.8.12-rc3.zip2013-09-10 15:38 8.0M 
    [TXT]cmake-2.8.12-rc4-AIX-powerpc.sh2013-10-01 16:23 22M 
    [   ]cmake-2.8.12-rc4-AIX-powerpc.tar.Z2013-10-01 16:23 32M 
    [   ]cmake-2.8.12-rc4-AIX-powerpc.tar.gz2013-10-01 16:23 22M 
    [   ]cmake-2.8.12-rc4-Darwin-universal.dmg2013-10-01 16:23 43M 
    [   ]cmake-2.8.12-rc4-Darwin-universal.tar.Z2013-10-01 16:23 61M 
    [   ]cmake-2.8.12-rc4-Darwin-universal.tar.gz2013-10-01 16:23 43M 
    [   ]cmake-2.8.12-rc4-Darwin64-universal.dmg2013-10-01 16:23 40M 
    [   ]cmake-2.8.12-rc4-Darwin64-universal.tar.Z2013-10-01 16:23 57M 
    [   ]cmake-2.8.12-rc4-Darwin64-universal.tar.gz2013-10-01 16:23 40M 
    [TXT]cmake-2.8.12-rc4-IRIX64-64.sh2013-10-01 16:23 13M 
    [   ]cmake-2.8.12-rc4-IRIX64-64.tar.Z2013-10-01 16:23 20M 
    [   ]cmake-2.8.12-rc4-IRIX64-64.tar.gz2013-10-01 16:23 13M 
    [TXT]cmake-2.8.12-rc4-IRIX64-n32.sh2013-10-01 16:23 13M 
    [   ]cmake-2.8.12-rc4-IRIX64-n32.tar.Z2013-10-01 16:23 19M 
    [   ]cmake-2.8.12-rc4-IRIX64-n32.tar.gz2013-10-01 16:23 13M 
    [TXT]cmake-2.8.12-rc4-Linux-i386.sh2013-10-01 16:23 24M 
    [   ]cmake-2.8.12-rc4-Linux-i386.tar.Z2013-10-01 16:23 33M 
    [   ]cmake-2.8.12-rc4-Linux-i386.tar.gz2013-10-01 16:23 24M 
    [TXT]cmake-2.8.12-rc4-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.12-rc4-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.12-rc4-win32-x86.exe2013-10-01 16:22 10M 
    [   ]cmake-2.8.12-rc4-win32-x86.zip2013-10-01 16:22 13M 
    [   ]cmake-2.8.12-rc4.tar.Z2013-10-01 16:22 9.1M 
    [   ]cmake-2.8.12-rc4.tar.gz2013-10-01 16:22 5.8M 
    [   ]cmake-2.8.12-rc4.zip2013-10-01 16:22 8.0M 
    [   ]cmake-2.8.12-win32-x86.exe2013-10-07 14:33 10M 
    [   ]cmake-2.8.12-win32-x86.zip2013-10-07 14:33 13M 
    [TXT]cmake-2.8.12.1-AIX-powerpc.sh2013-11-06 10:48 22M 
    [   ]cmake-2.8.12.1-AIX-powerpc.tar.Z2013-11-06 10:47 32M 
    [   ]cmake-2.8.12.1-AIX-powerpc.tar.gz2013-11-06 10:47 22M 
    [   ]cmake-2.8.12.1-Darwin-universal.dmg2013-11-06 10:47 43M 
    [   ]cmake-2.8.12.1-Darwin-universal.tar.Z2013-11-06 10:47 61M 
    [   ]cmake-2.8.12.1-Darwin-universal.tar.gz2013-11-06 10:47 43M 
    [   ]cmake-2.8.12.1-Darwin64-universal.dmg2013-11-06 10:47 41M 
    [   ]cmake-2.8.12.1-Darwin64-universal.tar.Z2013-11-06 10:46 57M 
    [   ]cmake-2.8.12.1-Darwin64-universal.tar.gz2013-11-06 10:46 41M 
    [TXT]cmake-2.8.12.1-IRIX64-64.sh2013-11-06 10:46 13M 
    [   ]cmake-2.8.12.1-IRIX64-64.tar.Z2013-11-06 10:46 20M 
    [   ]cmake-2.8.12.1-IRIX64-64.tar.gz2013-11-06 10:46 13M 
    [TXT]cmake-2.8.12.1-IRIX64-n32.sh2013-11-06 10:46 13M 
    [   ]cmake-2.8.12.1-IRIX64-n32.tar.Z2013-11-06 10:46 19M 
    [   ]cmake-2.8.12.1-IRIX64-n32.tar.gz2013-11-06 10:46 13M 
    [TXT]cmake-2.8.12.1-Linux-i386.sh2013-11-06 10:46 24M 
    [   ]cmake-2.8.12.1-Linux-i386.tar.Z2013-11-06 10:46 33M 
    [   ]cmake-2.8.12.1-Linux-i386.tar.gz2013-11-06 10:46 24M 
    [TXT]cmake-2.8.12.1-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.12.1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.12.1-win32-x86.exe2013-11-06 10:46 10M 
    [   ]cmake-2.8.12.1-win32-x86.zip2013-11-06 10:46 13M 
    [   ]cmake-2.8.12.1.tar.Z2013-11-06 10:45 9.1M 
    [   ]cmake-2.8.12.1.tar.gz2013-11-06 10:45 5.8M 
    [   ]cmake-2.8.12.1.zip2013-11-06 10:45 8.0M 
    [TXT]cmake-2.8.12.2-AIX-powerpc.sh2014-01-16 14:49 22M 
    [   ]cmake-2.8.12.2-AIX-powerpc.tar.Z2014-01-16 14:49 32M 
    [   ]cmake-2.8.12.2-AIX-powerpc.tar.gz2014-01-16 14:49 22M 
    [   ]cmake-2.8.12.2-Darwin-universal.dmg2014-01-16 14:48 43M 
    [   ]cmake-2.8.12.2-Darwin-universal.tar.Z2014-01-16 14:48 61M 
    [   ]cmake-2.8.12.2-Darwin-universal.tar.gz2014-01-16 14:48 43M 
    [   ]cmake-2.8.12.2-Darwin64-universal.dmg2014-01-16 14:48 41M 
    [   ]cmake-2.8.12.2-Darwin64-universal.tar.Z2014-01-16 14:48 57M 
    [   ]cmake-2.8.12.2-Darwin64-universal.tar.gz2014-01-16 14:48 41M 
    [TXT]cmake-2.8.12.2-IRIX64-64.sh2014-01-16 14:48 13M 
    [   ]cmake-2.8.12.2-IRIX64-64.tar.Z2014-01-16 14:48 20M 
    [   ]cmake-2.8.12.2-IRIX64-64.tar.gz2014-01-16 14:48 13M 
    [TXT]cmake-2.8.12.2-IRIX64-n32.sh2014-01-16 14:48 13M 
    [   ]cmake-2.8.12.2-IRIX64-n32.tar.Z2014-01-16 14:48 19M 
    [   ]cmake-2.8.12.2-IRIX64-n32.tar.gz2014-01-16 14:48 13M 
    [TXT]cmake-2.8.12.2-Linux-i386.sh2014-01-16 14:48 24M 
    [   ]cmake-2.8.12.2-Linux-i386.tar.Z2014-01-16 14:48 33M 
    [   ]cmake-2.8.12.2-Linux-i386.tar.gz2014-01-16 14:48 24M 
    [TXT]cmake-2.8.12.2-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-2.8.12.2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-2.8.12.2-win32-x86.exe2014-01-16 14:48 10M 
    [   ]cmake-2.8.12.2-win32-x86.zip2014-01-16 14:48 13M 
    [   ]cmake-2.8.12.2.tar.Z2014-01-16 14:48 9.2M 
    [   ]cmake-2.8.12.2.tar.gz2014-01-16 14:48 5.8M 
    [   ]cmake-2.8.12.2.zip2014-01-16 14:48 8.0M 
    [   ]cmake-2.8.12.tar.Z2013-10-07 14:32 9.2M 
    [   ]cmake-2.8.12.tar.gz2013-10-07 14:32 5.8M 
    [   ]cmake-2.8.12.zip2013-10-07 14:32 8.0M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.0/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.0/index.html deleted file mode 100644 index 2583dcd711..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.0/index.html +++ /dev/null @@ -1,209 +0,0 @@ - - - - Index of /files/v3.0 - - -

    Index of /files/v3.0

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.0.0-1-src.tar.bz22014-06-10 13:28 4.2M 
    [   ]cmake-3.0.0-1.tar.bz22014-06-10 13:28 8.0M 
    [   ]cmake-3.0.0-Darwin-universal.dmg2014-06-10 13:28 41M 
    [   ]cmake-3.0.0-Darwin-universal.tar.Z2014-06-10 13:27 58M 
    [   ]cmake-3.0.0-Darwin-universal.tar.gz2014-06-10 13:27 41M 
    [   ]cmake-3.0.0-Darwin64-universal.dmg2014-06-10 13:27 39M 
    [   ]cmake-3.0.0-Darwin64-universal.tar.Z2014-06-10 13:27 54M 
    [   ]cmake-3.0.0-Darwin64-universal.tar.gz2014-06-10 13:27 39M 
    [TXT]cmake-3.0.0-Linux-i386.sh2014-06-10 13:27 22M 
    [   ]cmake-3.0.0-Linux-i386.tar.Z2014-06-10 13:27 31M 
    [   ]cmake-3.0.0-Linux-i386.tar.gz2014-06-10 13:27 22M 
    [TXT]cmake-3.0.0-SHA-256.txt2016-04-13 12:48 1.3K 
    [TXT]cmake-3.0.0-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.0.0-rc1-1-src.tar.bz22014-02-28 13:54 4.2M 
    [   ]cmake-3.0.0-rc1-1.tar.bz22014-02-28 13:54 8.0M 
    [TXT]cmake-3.0.0-rc1-AIX-powerpc.sh2014-02-28 13:54 21M 
    [   ]cmake-3.0.0-rc1-AIX-powerpc.tar.Z2014-02-28 13:54 31M 
    [   ]cmake-3.0.0-rc1-AIX-powerpc.tar.gz2014-02-28 13:54 21M 
    [   ]cmake-3.0.0-rc1-Darwin-universal.dmg2014-02-28 13:54 41M 
    [   ]cmake-3.0.0-rc1-Darwin-universal.tar.Z2014-02-28 13:54 58M 
    [   ]cmake-3.0.0-rc1-Darwin-universal.tar.gz2014-02-28 13:54 41M 
    [   ]cmake-3.0.0-rc1-Darwin64-universal.dmg2014-02-28 13:54 39M 
    [   ]cmake-3.0.0-rc1-Darwin64-universal.tar.Z2014-02-28 13:54 54M 
    [   ]cmake-3.0.0-rc1-Darwin64-universal.tar.gz2014-02-28 13:54 39M 
    [TXT]cmake-3.0.0-rc1-IRIX64-64.sh2014-02-28 13:54 13M 
    [   ]cmake-3.0.0-rc1-IRIX64-64.tar.Z2014-02-28 13:54 19M 
    [   ]cmake-3.0.0-rc1-IRIX64-64.tar.gz2014-02-28 13:54 13M 
    [TXT]cmake-3.0.0-rc1-IRIX64-n32.sh2014-02-28 13:54 12M 
    [   ]cmake-3.0.0-rc1-IRIX64-n32.tar.Z2014-02-28 13:54 19M 
    [   ]cmake-3.0.0-rc1-IRIX64-n32.tar.gz2014-02-28 13:54 12M 
    [TXT]cmake-3.0.0-rc1-Linux-i386.sh2014-02-28 13:54 22M 
    [   ]cmake-3.0.0-rc1-Linux-i386.tar.Z2014-02-28 13:54 31M 
    [   ]cmake-3.0.0-rc1-Linux-i386.tar.gz2014-02-28 13:53 22M 
    [TXT]cmake-3.0.0-rc1-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-3.0.0-rc1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.0.0-rc1-win32-x86.exe2014-02-28 13:53 11M 
    [   ]cmake-3.0.0-rc1-win32-x86.zip2014-02-28 13:53 14M 
    [   ]cmake-3.0.0-rc1.tar.Z2014-02-28 13:53 8.5M 
    [   ]cmake-3.0.0-rc1.tar.gz2014-02-28 13:53 5.2M 
    [   ]cmake-3.0.0-rc1.zip2014-02-28 13:53 8.0M 
    [   ]cmake-3.0.0-rc2-1-src.tar.bz22014-03-19 10:29 4.2M 
    [   ]cmake-3.0.0-rc2-1.tar.bz22014-03-19 10:28 8.0M 
    [TXT]cmake-3.0.0-rc2-AIX-powerpc.sh2014-03-19 10:28 21M 
    [   ]cmake-3.0.0-rc2-AIX-powerpc.tar.Z2014-03-19 10:28 31M 
    [   ]cmake-3.0.0-rc2-AIX-powerpc.tar.gz2014-03-19 10:28 21M 
    [   ]cmake-3.0.0-rc2-Darwin-universal.dmg2014-03-19 10:28 41M 
    [   ]cmake-3.0.0-rc2-Darwin-universal.tar.Z2014-03-19 10:28 58M 
    [   ]cmake-3.0.0-rc2-Darwin-universal.tar.gz2014-03-19 10:28 41M 
    [   ]cmake-3.0.0-rc2-Darwin64-universal.dmg2014-03-19 10:28 39M 
    [   ]cmake-3.0.0-rc2-Darwin64-universal.tar.Z2014-03-19 10:28 54M 
    [   ]cmake-3.0.0-rc2-Darwin64-universal.tar.gz2014-03-19 10:28 39M 
    [TXT]cmake-3.0.0-rc2-IRIX64-64.sh2014-03-19 10:28 13M 
    [   ]cmake-3.0.0-rc2-IRIX64-64.tar.Z2014-03-19 10:28 19M 
    [   ]cmake-3.0.0-rc2-IRIX64-64.tar.gz2014-03-19 10:28 13M 
    [TXT]cmake-3.0.0-rc2-IRIX64-n32.sh2014-03-19 10:28 12M 
    [   ]cmake-3.0.0-rc2-IRIX64-n32.tar.Z2014-03-19 10:28 19M 
    [   ]cmake-3.0.0-rc2-IRIX64-n32.tar.gz2014-03-19 10:28 12M 
    [TXT]cmake-3.0.0-rc2-Linux-i386.sh2014-03-19 10:28 22M 
    [   ]cmake-3.0.0-rc2-Linux-i386.tar.Z2014-03-19 10:28 31M 
    [   ]cmake-3.0.0-rc2-Linux-i386.tar.gz2014-03-19 10:28 22M 
    [TXT]cmake-3.0.0-rc2-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-3.0.0-rc2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.0.0-rc2-win32-x86.exe2014-03-19 10:28 11M 
    [   ]cmake-3.0.0-rc2-win32-x86.zip2014-03-19 10:27 14M 
    [   ]cmake-3.0.0-rc2.tar.Z2014-03-19 10:27 8.5M 
    [   ]cmake-3.0.0-rc2.tar.gz2014-03-19 10:27 5.2M 
    [   ]cmake-3.0.0-rc2.zip2014-03-19 10:27 8.0M 
    [   ]cmake-3.0.0-rc3-1-src.tar.bz22014-03-26 13:00 4.2M 
    [   ]cmake-3.0.0-rc3-1.tar.bz22014-03-26 13:00 8.0M 
    [TXT]cmake-3.0.0-rc3-AIX-powerpc.sh2014-03-26 13:00 21M 
    [   ]cmake-3.0.0-rc3-AIX-powerpc.tar.Z2014-03-26 13:00 31M 
    [   ]cmake-3.0.0-rc3-AIX-powerpc.tar.gz2014-03-26 13:00 21M 
    [   ]cmake-3.0.0-rc3-Darwin-universal.dmg2014-03-26 13:00 41M 
    [   ]cmake-3.0.0-rc3-Darwin-universal.tar.Z2014-03-26 13:00 58M 
    [   ]cmake-3.0.0-rc3-Darwin-universal.tar.gz2014-03-26 13:00 41M 
    [   ]cmake-3.0.0-rc3-Darwin64-universal.dmg2014-03-26 12:59 39M 
    [   ]cmake-3.0.0-rc3-Darwin64-universal.tar.Z2014-03-26 12:59 54M 
    [   ]cmake-3.0.0-rc3-Darwin64-universal.tar.gz2014-03-26 12:59 39M 
    [TXT]cmake-3.0.0-rc3-IRIX64-64.sh2014-03-26 12:59 13M 
    [   ]cmake-3.0.0-rc3-IRIX64-64.tar.Z2014-03-26 12:59 19M 
    [   ]cmake-3.0.0-rc3-IRIX64-64.tar.gz2014-03-26 12:59 13M 
    [TXT]cmake-3.0.0-rc3-IRIX64-n32.sh2014-03-26 12:59 12M 
    [   ]cmake-3.0.0-rc3-IRIX64-n32.tar.Z2014-03-26 12:59 19M 
    [   ]cmake-3.0.0-rc3-IRIX64-n32.tar.gz2014-03-26 12:59 12M 
    [TXT]cmake-3.0.0-rc3-SHA-256.txt2016-04-13 12:48 1.9K 
    [TXT]cmake-3.0.0-rc3-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.0.0-rc3-win32-x86.exe2014-03-26 12:59 11M 
    [   ]cmake-3.0.0-rc3-win32-x86.zip2014-03-26 12:59 14M 
    [   ]cmake-3.0.0-rc3.tar.Z2014-03-26 12:59 8.4M 
    [   ]cmake-3.0.0-rc3.tar.gz2014-03-26 12:59 5.2M 
    [   ]cmake-3.0.0-rc3.zip2014-03-26 12:59 8.0M 
    [   ]cmake-3.0.0-rc4-1-src.tar.bz22014-04-18 09:52 4.2M 
    [   ]cmake-3.0.0-rc4-1.tar.bz22014-04-18 09:52 8.0M 
    [TXT]cmake-3.0.0-rc4-AIX-powerpc.sh2014-04-18 09:52 22M 
    [   ]cmake-3.0.0-rc4-AIX-powerpc.tar.Z2014-04-18 09:52 31M 
    [   ]cmake-3.0.0-rc4-AIX-powerpc.tar.gz2014-04-18 09:52 22M 
    [   ]cmake-3.0.0-rc4-Darwin-universal.dmg2014-04-18 09:52 41M 
    [   ]cmake-3.0.0-rc4-Darwin-universal.tar.Z2014-04-18 09:52 58M 
    [   ]cmake-3.0.0-rc4-Darwin-universal.tar.gz2014-04-18 09:51 41M 
    [   ]cmake-3.0.0-rc4-Darwin64-universal.dmg2014-04-18 09:51 39M 
    [   ]cmake-3.0.0-rc4-Darwin64-universal.tar.Z2014-04-18 09:51 54M 
    [   ]cmake-3.0.0-rc4-Darwin64-universal.tar.gz2014-04-18 09:51 39M 
    [TXT]cmake-3.0.0-rc4-IRIX64-64.sh2014-04-18 09:51 13M 
    [   ]cmake-3.0.0-rc4-IRIX64-64.tar.Z2014-04-18 09:51 19M 
    [   ]cmake-3.0.0-rc4-IRIX64-64.tar.gz2014-04-18 09:51 13M 
    [TXT]cmake-3.0.0-rc4-IRIX64-n32.sh2014-04-18 09:51 12M 
    [   ]cmake-3.0.0-rc4-IRIX64-n32.tar.Z2014-04-18 09:51 19M 
    [   ]cmake-3.0.0-rc4-IRIX64-n32.tar.gz2014-04-18 09:51 12M 
    [TXT]cmake-3.0.0-rc4-Linux-i386.sh2014-04-18 09:51 22M 
    [   ]cmake-3.0.0-rc4-Linux-i386.tar.Z2014-04-18 09:51 31M 
    [   ]cmake-3.0.0-rc4-Linux-i386.tar.gz2014-04-18 09:51 22M 
    [TXT]cmake-3.0.0-rc4-SHA-256.txt2016-04-13 12:48 2.2K 
    [TXT]cmake-3.0.0-rc4-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.0.0-rc4-win32-x86.exe2014-04-18 09:51 11M 
    [   ]cmake-3.0.0-rc4-win32-x86.zip2014-04-18 09:51 14M 
    [   ]cmake-3.0.0-rc4.tar.Z2014-04-18 09:51 8.5M 
    [   ]cmake-3.0.0-rc4.tar.gz2014-04-18 09:51 5.2M 
    [   ]cmake-3.0.0-rc4.zip2014-04-18 09:51 8.0M 
    [   ]cmake-3.0.0-rc5-1-src.tar.bz22014-05-13 14:25 4.2M 
    [   ]cmake-3.0.0-rc5-1.tar.bz22014-05-13 14:25 8.0M 
    [   ]cmake-3.0.0-rc5-Darwin-universal.dmg2014-05-13 14:25 41M 
    [   ]cmake-3.0.0-rc5-Darwin-universal.tar.Z2014-05-13 14:24 58M 
    [   ]cmake-3.0.0-rc5-Darwin-universal.tar.gz2014-05-13 14:24 41M 
    [   ]cmake-3.0.0-rc5-Darwin64-universal.dmg2014-05-13 14:24 39M 
    [   ]cmake-3.0.0-rc5-Darwin64-universal.tar.Z2014-05-13 14:24 54M 
    [   ]cmake-3.0.0-rc5-Darwin64-universal.tar.gz2014-05-13 14:24 39M 
    [TXT]cmake-3.0.0-rc5-Linux-i386.sh2014-05-13 14:24 22M 
    [   ]cmake-3.0.0-rc5-Linux-i386.tar.Z2014-05-13 14:24 31M 
    [   ]cmake-3.0.0-rc5-Linux-i386.tar.gz2014-05-13 14:24 22M 
    [TXT]cmake-3.0.0-rc5-SHA-256.txt2016-04-13 12:48 1.4K 
    [TXT]cmake-3.0.0-rc5-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.0.0-rc5-win32-x86.exe2014-05-13 14:24 11M 
    [   ]cmake-3.0.0-rc5-win32-x86.zip2014-05-13 14:24 14M 
    [   ]cmake-3.0.0-rc5.tar.Z2014-05-13 14:24 8.5M 
    [   ]cmake-3.0.0-rc5.tar.gz2014-05-13 14:24 5.2M 
    [   ]cmake-3.0.0-rc5.zip2014-05-13 14:24 8.0M 
    [   ]cmake-3.0.0-rc6-1-src.tar.bz22014-05-22 14:57 4.2M 
    [   ]cmake-3.0.0-rc6-1.tar.bz22014-05-22 14:57 8.0M 
    [   ]cmake-3.0.0-rc6-Darwin-universal.dmg2014-05-22 14:57 41M 
    [   ]cmake-3.0.0-rc6-Darwin-universal.tar.Z2014-05-22 14:56 58M 
    [   ]cmake-3.0.0-rc6-Darwin-universal.tar.gz2014-05-22 14:56 41M 
    [   ]cmake-3.0.0-rc6-Darwin64-universal.dmg2014-05-22 14:56 39M 
    [   ]cmake-3.0.0-rc6-Darwin64-universal.tar.Z2014-05-22 14:56 54M 
    [   ]cmake-3.0.0-rc6-Darwin64-universal.tar.gz2014-05-22 14:56 39M 
    [TXT]cmake-3.0.0-rc6-Linux-i386.sh2014-05-22 14:56 22M 
    [   ]cmake-3.0.0-rc6-Linux-i386.tar.Z2014-05-22 14:56 31M 
    [   ]cmake-3.0.0-rc6-Linux-i386.tar.gz2014-05-22 14:56 22M 
    [TXT]cmake-3.0.0-rc6-SHA-256.txt2016-04-13 12:48 1.4K 
    [TXT]cmake-3.0.0-rc6-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.0.0-rc6-win32-x86.exe2014-05-22 14:56 11M 
    [   ]cmake-3.0.0-rc6-win32-x86.zip2014-05-22 14:56 14M 
    [   ]cmake-3.0.0-rc6.tar.Z2014-05-22 14:56 8.5M 
    [   ]cmake-3.0.0-rc6.tar.gz2014-05-22 14:56 5.2M 
    [   ]cmake-3.0.0-rc6.zip2014-05-22 14:56 8.0M 
    [   ]cmake-3.0.0-win32-x86.exe2014-06-10 13:27 11M 
    [   ]cmake-3.0.0-win32-x86.zip2014-06-10 13:27 14M 
    [   ]cmake-3.0.0.tar.Z2014-06-10 13:27 8.5M 
    [   ]cmake-3.0.0.tar.gz2014-06-10 13:27 5.2M 
    [   ]cmake-3.0.0.zip2014-06-10 13:27 7.9M 
    [   ]cmake-3.0.1-1-src.tar.bz22014-09-11 09:19 4.2M 
    [   ]cmake-3.0.1-1.tar.bz22014-09-11 09:19 8.0M 
    [   ]cmake-3.0.1-Darwin-universal.dmg2014-09-11 09:19 41M 
    [   ]cmake-3.0.1-Darwin-universal.tar.Z2014-09-11 09:19 58M 
    [   ]cmake-3.0.1-Darwin-universal.tar.gz2014-09-11 09:19 41M 
    [   ]cmake-3.0.1-Darwin64-universal.dmg2014-09-11 09:18 39M 
    [   ]cmake-3.0.1-Darwin64-universal.tar.Z2014-09-11 09:18 54M 
    [   ]cmake-3.0.1-Darwin64-universal.tar.gz2014-09-11 09:18 39M 
    [TXT]cmake-3.0.1-Linux-i386.sh2014-09-11 09:18 22M 
    [   ]cmake-3.0.1-Linux-i386.tar.Z2014-09-11 09:18 31M 
    [   ]cmake-3.0.1-Linux-i386.tar.gz2014-09-11 09:18 22M 
    [TXT]cmake-3.0.1-SHA-256.txt2016-04-13 12:48 1.3K 
    [TXT]cmake-3.0.1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.0.1-win32-x86.exe2014-09-11 09:18 11M 
    [   ]cmake-3.0.1-win32-x86.zip2014-09-11 09:18 14M 
    [   ]cmake-3.0.1.tar.Z2014-09-11 09:18 8.5M 
    [   ]cmake-3.0.1.tar.gz2014-09-11 09:18 5.2M 
    [   ]cmake-3.0.1.zip2014-09-11 09:18 7.9M 
    [   ]cmake-3.0.2-1-src.tar.bz22014-09-11 12:19 4.2M 
    [   ]cmake-3.0.2-1.tar.bz22014-09-11 12:19 8.0M 
    [   ]cmake-3.0.2-Darwin-universal.dmg2014-09-11 12:19 41M 
    [   ]cmake-3.0.2-Darwin-universal.tar.Z2014-09-11 12:19 58M 
    [   ]cmake-3.0.2-Darwin-universal.tar.gz2014-09-11 12:19 41M 
    [   ]cmake-3.0.2-Darwin64-universal.dmg2014-09-11 12:19 39M 
    [   ]cmake-3.0.2-Darwin64-universal.tar.Z2014-09-11 12:19 54M 
    [   ]cmake-3.0.2-Darwin64-universal.tar.gz2014-09-11 12:18 39M 
    [TXT]cmake-3.0.2-Linux-i386.sh2014-09-11 12:18 22M 
    [   ]cmake-3.0.2-Linux-i386.tar.Z2014-09-11 12:18 31M 
    [   ]cmake-3.0.2-Linux-i386.tar.gz2014-09-11 12:18 22M 
    [TXT]cmake-3.0.2-SHA-256.txt2016-04-13 12:48 1.3K 
    [TXT]cmake-3.0.2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.0.2-win32-x86.exe2014-09-11 12:18 11M 
    [   ]cmake-3.0.2-win32-x86.zip2014-09-11 12:18 14M 
    [   ]cmake-3.0.2.tar.Z2014-09-11 12:18 8.5M 
    [   ]cmake-3.0.2.tar.gz2014-09-11 12:18 5.2M 
    [   ]cmake-3.0.2.zip2014-09-11 12:18 7.9M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.1/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.1/index.html deleted file mode 100644 index 635d34c096..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.1/index.html +++ /dev/null @@ -1,156 +0,0 @@ - - - - Index of /files/v3.1 - - -

    Index of /files/v3.1

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.1.0-1-src.tar.bz22014-12-17 13:10 4.5M 
    [   ]cmake-3.1.0-1.tar.bz22014-12-17 13:10 9.0M 
    [   ]cmake-3.1.0-Darwin-universal.dmg2014-12-17 13:10 45M 
    [   ]cmake-3.1.0-Darwin-universal.tar.Z2014-12-17 13:10 63M 
    [   ]cmake-3.1.0-Darwin-universal.tar.gz2014-12-17 13:10 44M 
    [   ]cmake-3.1.0-Darwin64-universal.dmg2014-12-17 13:10 28M 
    [   ]cmake-3.1.0-Darwin64-universal.tar.Z2014-12-17 13:10 40M 
    [   ]cmake-3.1.0-Darwin64-universal.tar.gz2014-12-17 13:10 28M 
    [   ]cmake-3.1.0-Darwin64.dmg2014-12-19 11:38 29M 
    [   ]cmake-3.1.0-Darwin64.tar.Z2014-12-19 11:38 41M 
    [   ]cmake-3.1.0-Darwin64.tar.gz2014-12-19 11:38 29M 
    [TXT]cmake-3.1.0-Linux-i386.sh2014-12-17 13:10 24M 
    [   ]cmake-3.1.0-Linux-i386.tar.Z2014-12-17 13:10 33M 
    [   ]cmake-3.1.0-Linux-i386.tar.gz2014-12-17 13:10 24M 
    [TXT]cmake-3.1.0-Linux-x86_64.sh2014-12-17 13:10 25M 
    [   ]cmake-3.1.0-Linux-x86_64.tar.Z2014-12-17 13:10 35M 
    [   ]cmake-3.1.0-Linux-x86_64.tar.gz2014-12-17 13:10 25M 
    [TXT]cmake-3.1.0-SHA-256.txt2016-04-13 12:48 1.9K 
    [TXT]cmake-3.1.0-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.1.0-rc1-1-src.tar.bz22014-10-28 14:09 4.5M 
    [   ]cmake-3.1.0-rc1-1.tar.bz22014-10-28 14:09 9.0M 
    [   ]cmake-3.1.0-rc1-Darwin-universal.dmg2014-10-28 14:09 45M 
    [   ]cmake-3.1.0-rc1-Darwin-universal.tar.Z2014-10-28 14:09 63M 
    [   ]cmake-3.1.0-rc1-Darwin-universal.tar.gz2014-10-28 14:09 44M 
    [   ]cmake-3.1.0-rc1-Darwin64-universal.dmg2014-10-28 14:09 28M 
    [   ]cmake-3.1.0-rc1-Darwin64-universal.tar.Z2014-10-28 14:09 40M 
    [   ]cmake-3.1.0-rc1-Darwin64-universal.tar.gz2014-10-28 14:08 28M 
    [TXT]cmake-3.1.0-rc1-Linux-i386.sh2014-10-28 14:08 24M 
    [   ]cmake-3.1.0-rc1-Linux-i386.tar.Z2014-10-28 14:08 33M 
    [   ]cmake-3.1.0-rc1-Linux-i386.tar.gz2014-10-28 14:08 24M 
    [TXT]cmake-3.1.0-rc1-SHA-256.txt2016-04-13 12:48 1.4K 
    [TXT]cmake-3.1.0-rc1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.1.0-rc1-win32-x86.exe2014-10-28 14:08 12M 
    [   ]cmake-3.1.0-rc1-win32-x86.zip2014-10-28 14:08 15M 
    [   ]cmake-3.1.0-rc1.tar.Z2014-10-28 14:08 9.2M 
    [   ]cmake-3.1.0-rc1.tar.gz2014-10-28 14:08 5.7M 
    [   ]cmake-3.1.0-rc1.zip2014-10-28 14:08 9.0M 
    [   ]cmake-3.1.0-rc2-1-src.tar.bz22014-11-13 11:51 4.5M 
    [   ]cmake-3.1.0-rc2-1.tar.bz22014-11-13 11:51 9.0M 
    [   ]cmake-3.1.0-rc2-Darwin-universal.dmg2014-11-13 11:51 45M 
    [   ]cmake-3.1.0-rc2-Darwin-universal.tar.Z2014-11-13 11:51 63M 
    [   ]cmake-3.1.0-rc2-Darwin-universal.tar.gz2014-11-13 11:50 44M 
    [   ]cmake-3.1.0-rc2-Darwin64-universal.dmg2014-11-13 11:50 28M 
    [   ]cmake-3.1.0-rc2-Darwin64-universal.tar.Z2014-11-13 11:50 40M 
    [   ]cmake-3.1.0-rc2-Darwin64-universal.tar.gz2014-11-13 11:50 28M 
    [TXT]cmake-3.1.0-rc2-Linux-i386.sh2014-11-13 11:50 24M 
    [   ]cmake-3.1.0-rc2-Linux-i386.tar.Z2014-11-13 11:50 33M 
    [   ]cmake-3.1.0-rc2-Linux-i386.tar.gz2014-11-13 11:50 24M 
    [TXT]cmake-3.1.0-rc2-SHA-256.txt2016-04-13 12:48 1.4K 
    [TXT]cmake-3.1.0-rc2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.1.0-rc2-win32-x86.exe2014-11-13 11:50 12M 
    [   ]cmake-3.1.0-rc2-win32-x86.zip2014-11-13 11:50 15M 
    [   ]cmake-3.1.0-rc2.tar.Z2014-11-13 11:50 9.2M 
    [   ]cmake-3.1.0-rc2.tar.gz2014-11-13 11:50 5.7M 
    [   ]cmake-3.1.0-rc2.zip2014-11-13 11:50 9.0M 
    [   ]cmake-3.1.0-rc3-1-src.tar.bz22014-12-09 16:53 4.5M 
    [   ]cmake-3.1.0-rc3-1.tar.bz22014-12-09 16:53 9.0M 
    [   ]cmake-3.1.0-rc3-Darwin-universal.dmg2014-12-09 16:53 45M 
    [   ]cmake-3.1.0-rc3-Darwin-universal.tar.Z2014-12-09 16:52 63M 
    [   ]cmake-3.1.0-rc3-Darwin-universal.tar.gz2014-12-09 16:52 44M 
    [   ]cmake-3.1.0-rc3-Darwin64-universal.dmg2014-12-09 16:52 28M 
    [   ]cmake-3.1.0-rc3-Darwin64-universal.tar.Z2014-12-09 16:52 40M 
    [   ]cmake-3.1.0-rc3-Darwin64-universal.tar.gz2014-12-09 16:52 28M 
    [TXT]cmake-3.1.0-rc3-Linux-i386.sh2014-12-09 16:52 24M 
    [   ]cmake-3.1.0-rc3-Linux-i386.tar.Z2014-12-09 16:52 33M 
    [   ]cmake-3.1.0-rc3-Linux-i386.tar.gz2014-12-09 16:52 24M 
    [TXT]cmake-3.1.0-rc3-Linux-x86_64.sh2014-12-09 16:52 25M 
    [   ]cmake-3.1.0-rc3-Linux-x86_64.tar.Z2014-12-09 16:52 35M 
    [   ]cmake-3.1.0-rc3-Linux-x86_64.tar.gz2014-12-09 16:52 25M 
    [TXT]cmake-3.1.0-rc3-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.1.0-rc3-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.1.0-rc3-win32-x86.exe2014-12-09 16:52 12M 
    [   ]cmake-3.1.0-rc3-win32-x86.zip2014-12-09 16:52 15M 
    [   ]cmake-3.1.0-rc3.tar.Z2014-12-09 16:52 9.2M 
    [   ]cmake-3.1.0-rc3.tar.gz2014-12-09 16:52 5.7M 
    [   ]cmake-3.1.0-rc3.zip2014-12-09 16:52 9.0M 
    [   ]cmake-3.1.0-win32-x86.exe2014-12-17 13:10 12M 
    [   ]cmake-3.1.0-win32-x86.zip2014-12-17 13:10 15M 
    [   ]cmake-3.1.0.tar.Z2014-12-17 13:10 9.2M 
    [   ]cmake-3.1.0.tar.gz2014-12-17 13:10 5.7M 
    [   ]cmake-3.1.0.zip2014-12-17 13:10 9.0M 
    [   ]cmake-3.1.1-1-src.tar.bz22015-01-22 16:49 4.5M 
    [   ]cmake-3.1.1-1.tar.bz22015-01-22 16:49 9.1M 
    [   ]cmake-3.1.1-Darwin-universal.dmg2015-01-22 16:49 45M 
    [   ]cmake-3.1.1-Darwin-universal.tar.Z2015-01-22 16:49 64M 
    [   ]cmake-3.1.1-Darwin-universal.tar.gz2015-01-22 16:49 44M 
    [   ]cmake-3.1.1-Darwin-x86_64.dmg2015-01-22 16:48 29M 
    [   ]cmake-3.1.1-Darwin-x86_64.tar.Z2015-01-22 16:48 41M 
    [   ]cmake-3.1.1-Darwin-x86_64.tar.gz2015-01-22 16:48 29M 
    [TXT]cmake-3.1.1-Linux-i386.sh2015-01-22 16:48 24M 
    [   ]cmake-3.1.1-Linux-i386.tar.Z2015-01-22 16:48 33M 
    [   ]cmake-3.1.1-Linux-i386.tar.gz2015-01-22 16:48 24M 
    [TXT]cmake-3.1.1-Linux-x86_64.sh2015-01-22 16:48 25M 
    [   ]cmake-3.1.1-Linux-x86_64.tar.Z2015-01-22 16:48 35M 
    [   ]cmake-3.1.1-Linux-x86_64.tar.gz2015-01-22 16:48 25M 
    [TXT]cmake-3.1.1-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.1.1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.1.1-win32-x86.exe2015-01-22 16:48 12M 
    [   ]cmake-3.1.1-win32-x86.zip2015-01-22 16:48 15M 
    [   ]cmake-3.1.1.tar.Z2015-01-22 16:48 9.2M 
    [   ]cmake-3.1.1.tar.gz2015-01-22 16:48 5.7M 
    [   ]cmake-3.1.1.zip2015-01-22 16:48 9.0M 
    [   ]cmake-3.1.2-Darwin-universal.dmg2015-02-05 10:06 45M 
    [   ]cmake-3.1.2-Darwin-universal.tar.Z2015-02-05 10:06 64M 
    [   ]cmake-3.1.2-Darwin-universal.tar.gz2015-02-05 10:06 44M 
    [   ]cmake-3.1.2-Darwin-x86_64.dmg2015-02-05 10:06 29M 
    [   ]cmake-3.1.2-Darwin-x86_64.tar.Z2015-02-05 10:06 41M 
    [   ]cmake-3.1.2-Darwin-x86_64.tar.gz2015-02-05 10:06 29M 
    [TXT]cmake-3.1.2-Linux-i386.sh2015-02-05 10:06 24M 
    [   ]cmake-3.1.2-Linux-i386.tar.Z2015-02-05 10:06 33M 
    [   ]cmake-3.1.2-Linux-i386.tar.gz2015-02-05 10:05 24M 
    [TXT]cmake-3.1.2-Linux-x86_64.sh2015-02-05 10:05 25M 
    [   ]cmake-3.1.2-Linux-x86_64.tar.Z2015-02-05 10:05 35M 
    [   ]cmake-3.1.2-Linux-x86_64.tar.gz2015-02-05 10:05 25M 
    [TXT]cmake-3.1.2-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.1.2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.1.2-win32-x86.exe2015-02-05 10:05 12M 
    [   ]cmake-3.1.2-win32-x86.zip2015-02-05 10:05 15M 
    [   ]cmake-3.1.2.tar.Z2015-02-05 10:05 9.2M 
    [   ]cmake-3.1.2.tar.gz2015-02-05 10:05 5.7M 
    [   ]cmake-3.1.2.zip2015-02-05 10:05 9.0M 
    [   ]cmake-3.1.3-1-src.tar.bz22015-02-12 17:40 4.5M 
    [   ]cmake-3.1.3-1.tar.bz22015-02-12 17:40 9.0M 
    [   ]cmake-3.1.3-Darwin-universal.dmg2015-02-12 17:40 45M 
    [   ]cmake-3.1.3-Darwin-universal.tar.Z2015-02-12 17:40 64M 
    [   ]cmake-3.1.3-Darwin-universal.tar.gz2015-02-12 17:40 44M 
    [   ]cmake-3.1.3-Darwin-x86_64.dmg2015-02-12 17:40 29M 
    [   ]cmake-3.1.3-Darwin-x86_64.tar.Z2015-02-12 17:40 41M 
    [   ]cmake-3.1.3-Darwin-x86_64.tar.gz2015-02-12 17:39 29M 
    [TXT]cmake-3.1.3-Linux-i386.sh2015-02-12 17:39 24M 
    [   ]cmake-3.1.3-Linux-i386.tar.Z2015-02-12 17:39 33M 
    [   ]cmake-3.1.3-Linux-i386.tar.gz2015-02-12 17:39 24M 
    [TXT]cmake-3.1.3-Linux-x86_64.sh2015-02-12 17:39 25M 
    [   ]cmake-3.1.3-Linux-x86_64.tar.Z2015-02-12 17:39 35M 
    [   ]cmake-3.1.3-Linux-x86_64.tar.gz2015-02-12 17:39 25M 
    [TXT]cmake-3.1.3-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.1.3-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.1.3-win32-x86.exe2015-02-12 17:39 12M 
    [   ]cmake-3.1.3-win32-x86.zip2015-02-12 17:39 15M 
    [   ]cmake-3.1.3.tar.Z2015-02-12 17:39 9.2M 
    [   ]cmake-3.1.3.tar.gz2015-02-12 17:39 5.7M 
    [   ]cmake-3.1.3.zip2015-02-12 17:39 9.0M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.10/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.10/index.html deleted file mode 100644 index df654a3691..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.10/index.html +++ /dev/null @@ -1,131 +0,0 @@ - - - - Index of /files/v3.10 - - -

    Index of /files/v3.10

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.10.0-Darwin-x86_64.dmg2017-11-20 16:00 26M 
    [   ]cmake-3.10.0-Darwin-x86_64.tar.gz2017-11-20 16:00 26M 
    [TXT]cmake-3.10.0-Linux-x86_64.sh2017-11-20 16:00 33M 
    [   ]cmake-3.10.0-Linux-x86_64.tar.gz2017-11-20 16:00 33M 
    [TXT]cmake-3.10.0-SHA-256.txt2017-11-20 16:00 1.0K 
    [TXT]cmake-3.10.0-SHA-256.txt.asc2017-11-20 16:00 833  
    [   ]cmake-3.10.0-rc1-Darwin-x86_64.dmg2017-10-05 15:50 26M 
    [   ]cmake-3.10.0-rc1-Darwin-x86_64.tar.gz2017-10-05 15:50 26M 
    [TXT]cmake-3.10.0-rc1-Linux-x86_64.sh2017-10-05 15:50 31M 
    [   ]cmake-3.10.0-rc1-Linux-x86_64.tar.gz2017-10-05 15:50 31M 
    [TXT]cmake-3.10.0-rc1-SHA-256.txt2017-10-05 15:50 1.0K 
    [TXT]cmake-3.10.0-rc1-SHA-256.txt.asc2017-10-05 15:50 833  
    [   ]cmake-3.10.0-rc1-win32-x86.msi2017-10-05 15:50 16M 
    [   ]cmake-3.10.0-rc1-win32-x86.zip2017-10-05 15:50 22M 
    [   ]cmake-3.10.0-rc1-win64-x64.msi2017-10-05 15:50 18M 
    [   ]cmake-3.10.0-rc1-win64-x64.zip2017-10-05 15:50 25M 
    [   ]cmake-3.10.0-rc1.tar.Z2017-10-05 15:50 12M 
    [   ]cmake-3.10.0-rc1.tar.gz2017-10-05 15:50 7.4M 
    [   ]cmake-3.10.0-rc1.zip2017-10-05 15:50 12M 
    [   ]cmake-3.10.0-rc2-Darwin-x86_64.dmg2017-10-12 12:07 26M 
    [   ]cmake-3.10.0-rc2-Darwin-x86_64.tar.gz2017-10-12 12:07 26M 
    [TXT]cmake-3.10.0-rc2-Linux-x86_64.sh2017-10-12 12:07 31M 
    [   ]cmake-3.10.0-rc2-Linux-x86_64.tar.gz2017-10-12 12:07 31M 
    [TXT]cmake-3.10.0-rc2-SHA-256.txt2017-10-12 12:07 1.0K 
    [TXT]cmake-3.10.0-rc2-SHA-256.txt.asc2017-10-12 12:06 833  
    [   ]cmake-3.10.0-rc2-win32-x86.msi2017-10-12 12:06 16M 
    [   ]cmake-3.10.0-rc2-win32-x86.zip2017-10-12 12:06 22M 
    [   ]cmake-3.10.0-rc2-win64-x64.msi2017-10-12 12:06 18M 
    [   ]cmake-3.10.0-rc2-win64-x64.zip2017-10-12 12:06 25M 
    [   ]cmake-3.10.0-rc2.tar.Z2017-10-12 12:06 12M 
    [   ]cmake-3.10.0-rc2.tar.gz2017-10-12 12:06 7.5M 
    [   ]cmake-3.10.0-rc2.zip2017-10-12 12:06 13M 
    [   ]cmake-3.10.0-rc3-Darwin-x86_64.dmg2017-10-19 13:24 26M 
    [   ]cmake-3.10.0-rc3-Darwin-x86_64.tar.gz2017-10-19 13:24 26M 
    [TXT]cmake-3.10.0-rc3-Linux-x86_64.sh2017-10-19 13:24 31M 
    [   ]cmake-3.10.0-rc3-Linux-x86_64.tar.gz2017-10-19 13:24 31M 
    [TXT]cmake-3.10.0-rc3-SHA-256.txt2017-10-19 13:24 1.0K 
    [TXT]cmake-3.10.0-rc3-SHA-256.txt.asc2017-10-19 13:24 833  
    [   ]cmake-3.10.0-rc3-win32-x86.msi2017-10-19 13:24 16M 
    [   ]cmake-3.10.0-rc3-win32-x86.zip2017-10-19 13:24 22M 
    [   ]cmake-3.10.0-rc3-win64-x64.msi2017-10-19 13:24 18M 
    [   ]cmake-3.10.0-rc3-win64-x64.zip2017-10-19 13:24 25M 
    [   ]cmake-3.10.0-rc3.tar.Z2017-10-19 13:24 12M 
    [   ]cmake-3.10.0-rc3.tar.gz2017-10-19 13:24 7.5M 
    [   ]cmake-3.10.0-rc3.zip2017-10-19 13:24 13M 
    [   ]cmake-3.10.0-rc4-Darwin-x86_64.dmg2017-11-01 15:37 26M 
    [   ]cmake-3.10.0-rc4-Darwin-x86_64.tar.gz2017-11-01 15:37 26M 
    [TXT]cmake-3.10.0-rc4-Linux-x86_64.sh2017-11-01 15:37 31M 
    [   ]cmake-3.10.0-rc4-Linux-x86_64.tar.gz2017-11-01 15:37 31M 
    [TXT]cmake-3.10.0-rc4-SHA-256.txt2017-11-01 15:37 1.0K 
    [TXT]cmake-3.10.0-rc4-SHA-256.txt.asc2017-11-01 15:37 833  
    [   ]cmake-3.10.0-rc4-win32-x86.msi2017-11-01 15:37 16M 
    [   ]cmake-3.10.0-rc4-win32-x86.zip2017-11-01 15:37 22M 
    [   ]cmake-3.10.0-rc4-win64-x64.msi2017-11-01 15:37 18M 
    [   ]cmake-3.10.0-rc4-win64-x64.zip2017-11-01 15:37 25M 
    [   ]cmake-3.10.0-rc4.tar.Z2017-11-01 15:37 12M 
    [   ]cmake-3.10.0-rc4.tar.gz2017-11-01 15:37 7.5M 
    [   ]cmake-3.10.0-rc4.zip2017-11-01 15:37 13M 
    [   ]cmake-3.10.0-rc5-Darwin-x86_64.dmg2017-11-10 14:01 26M 
    [   ]cmake-3.10.0-rc5-Darwin-x86_64.tar.gz2017-11-10 14:01 26M 
    [TXT]cmake-3.10.0-rc5-Linux-x86_64.sh2017-11-10 14:01 31M 
    [   ]cmake-3.10.0-rc5-Linux-x86_64.tar.gz2017-11-10 14:01 31M 
    [TXT]cmake-3.10.0-rc5-SHA-256.txt2017-11-10 14:01 1.0K 
    [TXT]cmake-3.10.0-rc5-SHA-256.txt.asc2017-11-10 14:01 833  
    [   ]cmake-3.10.0-rc5-win32-x86.msi2017-11-10 14:01 16M 
    [   ]cmake-3.10.0-rc5-win32-x86.zip2017-11-10 14:01 22M 
    [   ]cmake-3.10.0-rc5-win64-x64.msi2017-11-10 14:01 18M 
    [   ]cmake-3.10.0-rc5-win64-x64.zip2017-11-10 14:01 25M 
    [   ]cmake-3.10.0-rc5.tar.Z2017-11-10 14:01 12M 
    [   ]cmake-3.10.0-rc5.tar.gz2017-11-10 14:01 7.5M 
    [   ]cmake-3.10.0-rc5.zip2017-11-10 14:00 13M 
    [   ]cmake-3.10.0-win32-x86.msi2017-11-20 16:00 15M 
    [   ]cmake-3.10.0-win32-x86.zip2017-11-20 16:00 21M 
    [   ]cmake-3.10.0-win64-x64.msi2017-11-20 16:00 17M 
    [   ]cmake-3.10.0-win64-x64.zip2017-11-20 16:00 24M 
    [   ]cmake-3.10.0.tar.Z2017-11-20 16:00 12M 
    [   ]cmake-3.10.0.tar.gz2017-11-20 16:00 7.5M 
    [   ]cmake-3.10.0.zip2017-11-20 16:00 12M 
    [   ]cmake-3.10.1-Darwin-x86_64.dmg2017-12-14 09:10 26M 
    [   ]cmake-3.10.1-Darwin-x86_64.tar.gz2017-12-14 09:10 26M 
    [TXT]cmake-3.10.1-Linux-x86_64.sh2017-12-14 09:10 33M 
    [   ]cmake-3.10.1-Linux-x86_64.tar.gz2017-12-14 09:10 33M 
    [TXT]cmake-3.10.1-SHA-256.txt2017-12-14 09:10 1.0K 
    [TXT]cmake-3.10.1-SHA-256.txt.asc2017-12-14 09:10 833  
    [   ]cmake-3.10.1-win32-x86.msi2017-12-14 09:10 15M 
    [   ]cmake-3.10.1-win32-x86.zip2017-12-14 09:10 21M 
    [   ]cmake-3.10.1-win64-x64.msi2017-12-14 09:10 18M 
    [   ]cmake-3.10.1-win64-x64.zip2017-12-14 09:09 25M 
    [   ]cmake-3.10.1.tar.Z2017-12-14 09:09 12M 
    [   ]cmake-3.10.1.tar.gz2017-12-14 09:09 7.5M 
    [   ]cmake-3.10.1.zip2017-12-14 09:09 12M 
    [   ]cmake-3.10.2-Darwin-x86_64.dmg2018-01-18 12:09 26M 
    [   ]cmake-3.10.2-Darwin-x86_64.tar.gz2018-01-18 12:09 26M 
    [TXT]cmake-3.10.2-Linux-x86_64.sh2018-01-18 12:09 33M 
    [   ]cmake-3.10.2-Linux-x86_64.tar.gz2018-01-18 12:09 33M 
    [TXT]cmake-3.10.2-SHA-256.txt2018-01-18 12:09 1.0K 
    [TXT]cmake-3.10.2-SHA-256.txt.asc2018-01-18 12:09 833  
    [   ]cmake-3.10.2-win32-x86.msi2018-01-18 12:09 15M 
    [   ]cmake-3.10.2-win32-x86.zip2018-01-18 12:09 21M 
    [   ]cmake-3.10.2-win64-x64.msi2018-01-18 12:09 18M 
    [   ]cmake-3.10.2-win64-x64.zip2018-01-18 12:09 25M 
    [   ]cmake-3.10.2.tar.Z2018-01-18 12:09 12M 
    [   ]cmake-3.10.2.tar.gz2018-01-18 12:09 7.5M 
    [   ]cmake-3.10.2.zip2018-01-18 12:08 12M 
    [   ]cmake-3.10.3-Darwin-x86_64.dmg2018-03-16 09:38 26M 
    [   ]cmake-3.10.3-Darwin-x86_64.tar.gz2018-03-16 09:38 26M 
    [TXT]cmake-3.10.3-Linux-x86_64.sh2018-03-16 09:38 33M 
    [   ]cmake-3.10.3-Linux-x86_64.tar.gz2018-03-16 09:38 33M 
    [TXT]cmake-3.10.3-SHA-256.txt2018-03-16 09:38 1.0K 
    [TXT]cmake-3.10.3-SHA-256.txt.asc2018-03-16 09:38 833  
    [   ]cmake-3.10.3-win32-x86.msi2018-03-16 09:38 15M 
    [   ]cmake-3.10.3-win32-x86.zip2018-03-16 09:38 21M 
    [   ]cmake-3.10.3-win64-x64.msi2018-03-16 09:38 18M 
    [   ]cmake-3.10.3-win64-x64.zip2018-03-16 09:38 25M 
    [   ]cmake-3.10.3.tar.Z2018-03-16 09:38 12M 
    [   ]cmake-3.10.3.tar.gz2018-03-16 09:38 7.5M 
    [   ]cmake-3.10.3.zip2018-03-16 09:38 12M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.11/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.11/index.html deleted file mode 100644 index 869d7427a1..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.11/index.html +++ /dev/null @@ -1,131 +0,0 @@ - - - - Index of /files/v3.11 - - -

    Index of /files/v3.11

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.11.0-Darwin-x86_64.dmg2018-03-28 13:40 26M 
    [   ]cmake-3.11.0-Darwin-x86_64.tar.gz2018-03-28 13:40 26M 
    [TXT]cmake-3.11.0-Linux-x86_64.sh2018-03-28 13:40 33M 
    [   ]cmake-3.11.0-Linux-x86_64.tar.gz2018-03-28 13:40 33M 
    [TXT]cmake-3.11.0-SHA-256.txt2018-03-28 13:40 1.0K 
    [TXT]cmake-3.11.0-SHA-256.txt.asc2018-03-28 13:40 833  
    [   ]cmake-3.11.0-rc1-Darwin-x86_64.dmg2018-02-15 11:53 26M 
    [   ]cmake-3.11.0-rc1-Darwin-x86_64.tar.gz2018-02-15 11:53 26M 
    [TXT]cmake-3.11.0-rc1-Linux-x86_64.sh2018-02-15 11:53 33M 
    [   ]cmake-3.11.0-rc1-Linux-x86_64.tar.gz2018-02-15 11:53 33M 
    [TXT]cmake-3.11.0-rc1-SHA-256.txt2018-02-15 11:53 1.0K 
    [TXT]cmake-3.11.0-rc1-SHA-256.txt.asc2018-02-15 11:53 833  
    [   ]cmake-3.11.0-rc1-win32-x86.msi2018-02-15 11:53 16M 
    [   ]cmake-3.11.0-rc1-win32-x86.zip2018-02-15 11:53 22M 
    [   ]cmake-3.11.0-rc1-win64-x64.msi2018-02-15 11:53 19M 
    [   ]cmake-3.11.0-rc1-win64-x64.zip2018-02-15 11:53 26M 
    [   ]cmake-3.11.0-rc1.tar.Z2018-02-15 11:52 12M 
    [   ]cmake-3.11.0-rc1.tar.gz2018-02-15 11:52 7.6M 
    [   ]cmake-3.11.0-rc1.zip2018-02-15 11:52 13M 
    [   ]cmake-3.11.0-rc2-Darwin-x86_64.dmg2018-02-27 11:09 26M 
    [   ]cmake-3.11.0-rc2-Darwin-x86_64.tar.gz2018-02-27 11:09 26M 
    [TXT]cmake-3.11.0-rc2-Linux-x86_64.sh2018-02-27 11:09 33M 
    [   ]cmake-3.11.0-rc2-Linux-x86_64.tar.gz2018-02-27 11:09 33M 
    [TXT]cmake-3.11.0-rc2-SHA-256.txt2018-02-27 11:09 1.0K 
    [TXT]cmake-3.11.0-rc2-SHA-256.txt.asc2018-02-27 11:09 833  
    [   ]cmake-3.11.0-rc2-win32-x86.msi2018-02-27 11:09 16M 
    [   ]cmake-3.11.0-rc2-win32-x86.zip2018-02-27 11:09 22M 
    [   ]cmake-3.11.0-rc2-win64-x64.msi2018-02-27 11:09 19M 
    [   ]cmake-3.11.0-rc2-win64-x64.zip2018-02-27 11:09 26M 
    [   ]cmake-3.11.0-rc2.tar.Z2018-02-27 11:09 12M 
    [   ]cmake-3.11.0-rc2.tar.gz2018-02-27 11:09 7.6M 
    [   ]cmake-3.11.0-rc2.zip2018-02-27 11:09 13M 
    [   ]cmake-3.11.0-rc3-Darwin-x86_64.dmg2018-03-09 13:46 26M 
    [   ]cmake-3.11.0-rc3-Darwin-x86_64.tar.gz2018-03-09 13:46 26M 
    [TXT]cmake-3.11.0-rc3-Linux-x86_64.sh2018-03-09 13:46 33M 
    [   ]cmake-3.11.0-rc3-Linux-x86_64.tar.gz2018-03-09 13:46 33M 
    [TXT]cmake-3.11.0-rc3-SHA-256.txt2018-03-09 13:46 1.0K 
    [TXT]cmake-3.11.0-rc3-SHA-256.txt.asc2018-03-09 13:46 833  
    [   ]cmake-3.11.0-rc3-win32-x86.msi2018-03-09 13:46 16M 
    [   ]cmake-3.11.0-rc3-win32-x86.zip2018-03-09 13:45 22M 
    [   ]cmake-3.11.0-rc3-win64-x64.msi2018-03-09 13:45 19M 
    [   ]cmake-3.11.0-rc3-win64-x64.zip2018-03-09 13:45 26M 
    [   ]cmake-3.11.0-rc3.tar.Z2018-03-09 13:45 12M 
    [   ]cmake-3.11.0-rc3.tar.gz2018-03-09 13:45 7.6M 
    [   ]cmake-3.11.0-rc3.zip2018-03-09 13:45 13M 
    [   ]cmake-3.11.0-rc4-Darwin-x86_64.dmg2018-03-19 11:07 26M 
    [   ]cmake-3.11.0-rc4-Darwin-x86_64.tar.gz2018-03-19 11:07 26M 
    [TXT]cmake-3.11.0-rc4-Linux-x86_64.sh2018-03-19 11:06 33M 
    [   ]cmake-3.11.0-rc4-Linux-x86_64.tar.gz2018-03-19 11:06 33M 
    [TXT]cmake-3.11.0-rc4-SHA-256.txt2018-03-19 11:06 1.0K 
    [TXT]cmake-3.11.0-rc4-SHA-256.txt.asc2018-03-19 11:06 833  
    [   ]cmake-3.11.0-rc4-win32-x86.msi2018-03-19 11:06 16M 
    [   ]cmake-3.11.0-rc4-win32-x86.zip2018-03-19 11:06 22M 
    [   ]cmake-3.11.0-rc4-win64-x64.msi2018-03-19 11:06 19M 
    [   ]cmake-3.11.0-rc4-win64-x64.zip2018-03-19 11:06 26M 
    [   ]cmake-3.11.0-rc4.tar.Z2018-03-19 11:06 12M 
    [   ]cmake-3.11.0-rc4.tar.gz2018-03-19 11:06 7.6M 
    [   ]cmake-3.11.0-rc4.zip2018-03-19 11:06 13M 
    [   ]cmake-3.11.0-win32-x86.msi2018-03-28 13:40 16M 
    [   ]cmake-3.11.0-win32-x86.zip2018-03-28 13:40 22M 
    [   ]cmake-3.11.0-win64-x64.msi2018-03-28 13:40 19M 
    [   ]cmake-3.11.0-win64-x64.zip2018-03-28 13:40 26M 
    [   ]cmake-3.11.0.tar.Z2018-03-28 13:40 12M 
    [   ]cmake-3.11.0.tar.gz2018-03-28 13:40 7.6M 
    [   ]cmake-3.11.0.zip2018-03-28 13:40 13M 
    [   ]cmake-3.11.1-Darwin-x86_64.dmg2018-04-17 11:33 26M 
    [   ]cmake-3.11.1-Darwin-x86_64.tar.gz2018-04-17 11:32 26M 
    [TXT]cmake-3.11.1-Linux-x86_64.sh2018-04-17 11:32 33M 
    [   ]cmake-3.11.1-Linux-x86_64.tar.gz2018-04-17 11:32 33M 
    [TXT]cmake-3.11.1-SHA-256.txt2018-04-17 11:32 1.0K 
    [TXT]cmake-3.11.1-SHA-256.txt.asc2018-04-17 11:32 833  
    [   ]cmake-3.11.1-win32-x86.msi2018-04-17 11:32 16M 
    [   ]cmake-3.11.1-win32-x86.zip2018-04-17 11:32 22M 
    [   ]cmake-3.11.1-win64-x64.msi2018-04-17 11:32 19M 
    [   ]cmake-3.11.1-win64-x64.zip2018-04-17 11:32 26M 
    [   ]cmake-3.11.1.tar.Z2018-04-17 11:32 12M 
    [   ]cmake-3.11.1.tar.gz2018-04-17 11:32 7.6M 
    [   ]cmake-3.11.1.zip2018-04-17 11:32 13M 
    [   ]cmake-3.11.2-Darwin-x86_64.dmg2018-05-17 12:45 26M 
    [   ]cmake-3.11.2-Darwin-x86_64.tar.gz2018-05-17 12:45 26M 
    [TXT]cmake-3.11.2-Linux-x86_64.sh2018-05-17 12:45 33M 
    [   ]cmake-3.11.2-Linux-x86_64.tar.gz2018-05-17 12:45 33M 
    [TXT]cmake-3.11.2-SHA-256.txt2018-05-17 12:45 1.0K 
    [TXT]cmake-3.11.2-SHA-256.txt.asc2018-05-17 12:45 833  
    [   ]cmake-3.11.2-win32-x86.msi2018-05-17 12:45 16M 
    [   ]cmake-3.11.2-win32-x86.zip2018-05-17 12:45 22M 
    [   ]cmake-3.11.2-win64-x64.msi2018-05-17 12:45 18M 
    [   ]cmake-3.11.2-win64-x64.zip2018-05-17 12:44 25M 
    [   ]cmake-3.11.2.tar.Z2018-05-17 12:44 12M 
    [   ]cmake-3.11.2.tar.gz2018-05-17 12:44 7.6M 
    [   ]cmake-3.11.2.zip2018-05-17 12:44 13M 
    [   ]cmake-3.11.3-Darwin-x86_64.dmg2018-05-31 15:53 26M 
    [   ]cmake-3.11.3-Darwin-x86_64.tar.gz2018-05-31 15:53 26M 
    [TXT]cmake-3.11.3-Linux-x86_64.sh2018-05-31 15:53 33M 
    [   ]cmake-3.11.3-Linux-x86_64.tar.gz2018-05-31 15:52 33M 
    [TXT]cmake-3.11.3-SHA-256.txt2018-05-31 15:52 1.0K 
    [TXT]cmake-3.11.3-SHA-256.txt.asc2018-05-31 15:52 833  
    [   ]cmake-3.11.3-win32-x86.msi2018-05-31 15:52 16M 
    [   ]cmake-3.11.3-win32-x86.zip2018-05-31 15:52 22M 
    [   ]cmake-3.11.3-win64-x64.msi2018-05-31 15:52 18M 
    [   ]cmake-3.11.3-win64-x64.zip2018-05-31 15:52 25M 
    [   ]cmake-3.11.3.tar.Z2018-05-31 15:52 12M 
    [   ]cmake-3.11.3.tar.gz2018-05-31 15:52 7.6M 
    [   ]cmake-3.11.3.zip2018-05-31 15:52 13M 
    [   ]cmake-3.11.4-Darwin-x86_64.dmg2018-06-14 13:50 26M 
    [   ]cmake-3.11.4-Darwin-x86_64.tar.gz2018-06-14 13:50 26M 
    [TXT]cmake-3.11.4-Linux-x86_64.sh2018-06-14 13:50 33M 
    [   ]cmake-3.11.4-Linux-x86_64.tar.gz2018-06-14 13:50 33M 
    [TXT]cmake-3.11.4-SHA-256.txt2018-06-14 13:50 1.0K 
    [TXT]cmake-3.11.4-SHA-256.txt.asc2018-06-14 13:50 833  
    [   ]cmake-3.11.4-win32-x86.msi2018-06-14 13:50 16M 
    [   ]cmake-3.11.4-win32-x86.zip2018-06-14 13:50 22M 
    [   ]cmake-3.11.4-win64-x64.msi2018-06-14 13:50 18M 
    [   ]cmake-3.11.4-win64-x64.zip2018-06-14 13:50 25M 
    [   ]cmake-3.11.4.tar.Z2018-06-14 13:50 12M 
    [   ]cmake-3.11.4.tar.gz2018-06-14 13:50 7.6M 
    [   ]cmake-3.11.4.zip2018-06-14 13:50 13M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.12/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.12/index.html deleted file mode 100644 index 1977e7e07f..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.12/index.html +++ /dev/null @@ -1,118 +0,0 @@ - - - - Index of /files/v3.12 - - -

    Index of /files/v3.12

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.12.0-Darwin-x86_64.dmg2018-07-17 09:58 27M 
    [   ]cmake-3.12.0-Darwin-x86_64.tar.gz2018-07-17 09:58 27M 
    [TXT]cmake-3.12.0-Linux-x86_64.sh2018-07-17 09:58 33M 
    [   ]cmake-3.12.0-Linux-x86_64.tar.gz2018-07-17 09:58 33M 
    [TXT]cmake-3.12.0-SHA-256.txt2018-07-17 09:58 1.0K 
    [TXT]cmake-3.12.0-SHA-256.txt.asc2018-07-17 09:58 833  
    [   ]cmake-3.12.0-rc1-Darwin-x86_64.dmg2018-06-14 15:01 27M 
    [   ]cmake-3.12.0-rc1-Darwin-x86_64.tar.gz2018-06-14 15:01 27M 
    [TXT]cmake-3.12.0-rc1-Linux-x86_64.sh2018-06-14 15:01 29M 
    [   ]cmake-3.12.0-rc1-Linux-x86_64.tar.gz2018-06-14 15:01 29M 
    [TXT]cmake-3.12.0-rc1-SHA-256.txt2018-06-14 15:01 1.0K 
    [TXT]cmake-3.12.0-rc1-SHA-256.txt.asc2018-06-14 15:01 833  
    [   ]cmake-3.12.0-rc1-win32-x86.msi2018-06-14 15:01 16M 
    [   ]cmake-3.12.0-rc1-win32-x86.zip2018-06-14 15:01 23M 
    [   ]cmake-3.12.0-rc1-win64-x64.msi2018-06-14 15:01 18M 
    [   ]cmake-3.12.0-rc1-win64-x64.zip2018-06-14 15:01 25M 
    [   ]cmake-3.12.0-rc1.tar.Z2018-06-14 15:01 13M 
    [   ]cmake-3.12.0-rc1.tar.gz2018-06-14 15:01 7.7M 
    [   ]cmake-3.12.0-rc1.zip2018-06-14 15:01 13M 
    [   ]cmake-3.12.0-rc2-Darwin-x86_64.dmg2018-06-29 13:57 27M 
    [   ]cmake-3.12.0-rc2-Darwin-x86_64.tar.gz2018-06-29 13:57 27M 
    [TXT]cmake-3.12.0-rc2-Linux-x86_64.sh2018-06-29 13:57 29M 
    [   ]cmake-3.12.0-rc2-Linux-x86_64.tar.gz2018-06-29 13:57 29M 
    [TXT]cmake-3.12.0-rc2-SHA-256.txt2018-06-29 13:57 1.0K 
    [TXT]cmake-3.12.0-rc2-SHA-256.txt.asc2018-06-29 13:57 833  
    [   ]cmake-3.12.0-rc2-win32-x86.msi2018-06-29 13:56 16M 
    [   ]cmake-3.12.0-rc2-win32-x86.zip2018-06-29 13:56 23M 
    [   ]cmake-3.12.0-rc2-win64-x64.msi2018-06-29 13:56 18M 
    [   ]cmake-3.12.0-rc2-win64-x64.zip2018-06-29 13:56 25M 
    [   ]cmake-3.12.0-rc2.tar.Z2018-06-29 13:56 13M 
    [   ]cmake-3.12.0-rc2.tar.gz2018-06-29 13:56 7.7M 
    [   ]cmake-3.12.0-rc2.zip2018-06-29 13:56 13M 
    [   ]cmake-3.12.0-rc3-Darwin-x86_64.dmg2018-07-09 11:38 27M 
    [   ]cmake-3.12.0-rc3-Darwin-x86_64.tar.gz2018-07-09 11:38 27M 
    [TXT]cmake-3.12.0-rc3-Linux-x86_64.sh2018-07-09 11:38 33M 
    [   ]cmake-3.12.0-rc3-Linux-x86_64.tar.gz2018-07-09 11:38 33M 
    [TXT]cmake-3.12.0-rc3-SHA-256.txt2018-07-09 11:38 1.0K 
    [TXT]cmake-3.12.0-rc3-SHA-256.txt.asc2018-07-09 11:38 833  
    [   ]cmake-3.12.0-rc3-win32-x86.msi2018-07-09 11:38 16M 
    [   ]cmake-3.12.0-rc3-win32-x86.zip2018-07-09 11:38 23M 
    [   ]cmake-3.12.0-rc3-win64-x64.msi2018-07-09 11:37 18M 
    [   ]cmake-3.12.0-rc3-win64-x64.zip2018-07-09 11:37 25M 
    [   ]cmake-3.12.0-rc3.tar.Z2018-07-09 11:37 13M 
    [   ]cmake-3.12.0-rc3.tar.gz2018-07-09 11:37 7.7M 
    [   ]cmake-3.12.0-rc3.zip2018-07-09 11:37 13M 
    [   ]cmake-3.12.0-win32-x86.msi2018-07-17 09:58 16M 
    [   ]cmake-3.12.0-win32-x86.zip2018-07-17 09:58 23M 
    [   ]cmake-3.12.0-win64-x64.msi2018-07-17 09:58 18M 
    [   ]cmake-3.12.0-win64-x64.zip2018-07-17 09:58 25M 
    [   ]cmake-3.12.0.tar.Z2018-07-17 09:58 13M 
    [   ]cmake-3.12.0.tar.gz2018-07-17 09:58 7.7M 
    [   ]cmake-3.12.0.zip2018-07-17 09:57 13M 
    [   ]cmake-3.12.1-Darwin-x86_64.dmg2018-08-09 11:21 27M 
    [   ]cmake-3.12.1-Darwin-x86_64.tar.gz2018-08-09 11:21 27M 
    [TXT]cmake-3.12.1-Linux-x86_64.sh2018-08-09 11:21 33M 
    [   ]cmake-3.12.1-Linux-x86_64.tar.gz2018-08-09 11:21 33M 
    [TXT]cmake-3.12.1-SHA-256.txt2018-08-09 11:20 1.0K 
    [TXT]cmake-3.12.1-SHA-256.txt.asc2018-08-09 11:20 833  
    [   ]cmake-3.12.1-win32-x86.msi2018-08-09 11:20 16M 
    [   ]cmake-3.12.1-win32-x86.zip2018-08-09 11:20 23M 
    [   ]cmake-3.12.1-win64-x64.msi2018-08-09 11:20 18M 
    [   ]cmake-3.12.1-win64-x64.zip2018-08-09 11:20 25M 
    [   ]cmake-3.12.1.tar.Z2018-08-09 11:20 13M 
    [   ]cmake-3.12.1.tar.gz2018-08-09 11:20 7.7M 
    [   ]cmake-3.12.1.zip2018-08-09 11:20 13M 
    [   ]cmake-3.12.2-Darwin-x86_64.dmg2018-09-07 12:51 28M 
    [   ]cmake-3.12.2-Darwin-x86_64.tar.gz2018-09-07 12:51 28M 
    [TXT]cmake-3.12.2-Linux-x86_64.sh2018-09-07 12:51 33M 
    [   ]cmake-3.12.2-Linux-x86_64.tar.gz2018-09-07 12:51 33M 
    [TXT]cmake-3.12.2-SHA-256.txt2018-09-07 12:51 1.0K 
    [TXT]cmake-3.12.2-SHA-256.txt.asc2018-09-07 12:51 833  
    [   ]cmake-3.12.2-win32-x86.msi2018-09-07 12:51 16M 
    [   ]cmake-3.12.2-win32-x86.zip2018-09-07 12:51 22M 
    [   ]cmake-3.12.2-win64-x64.msi2018-09-07 12:51 18M 
    [   ]cmake-3.12.2-win64-x64.zip2018-09-07 12:51 25M 
    [   ]cmake-3.12.2.tar.Z2018-09-07 12:51 13M 
    [   ]cmake-3.12.2.tar.gz2018-09-07 12:51 8.0M 
    [   ]cmake-3.12.2.zip2018-09-07 12:51 13M 
    [   ]cmake-3.12.3-Darwin-x86_64.dmg2018-10-03 10:46 28M 
    [   ]cmake-3.12.3-Darwin-x86_64.tar.gz2018-10-03 10:46 28M 
    [TXT]cmake-3.12.3-Linux-x86_64.sh2018-10-03 10:45 33M 
    [   ]cmake-3.12.3-Linux-x86_64.tar.gz2018-10-03 10:45 33M 
    [TXT]cmake-3.12.3-SHA-256.txt2018-10-03 10:45 1.0K 
    [TXT]cmake-3.12.3-SHA-256.txt.asc2018-10-03 10:45 833  
    [   ]cmake-3.12.3-win32-x86.msi2018-10-03 10:45 16M 
    [   ]cmake-3.12.3-win32-x86.zip2018-10-03 10:45 22M 
    [   ]cmake-3.12.3-win64-x64.msi2018-10-03 10:45 18M 
    [   ]cmake-3.12.3-win64-x64.zip2018-10-03 10:45 25M 
    [   ]cmake-3.12.3.tar.Z2018-10-03 10:45 13M 
    [   ]cmake-3.12.3.tar.gz2018-10-03 10:45 8.0M 
    [   ]cmake-3.12.3.zip2018-10-03 10:45 13M 
    [   ]cmake-3.12.4-Darwin-x86_64.dmg2018-11-02 13:51 28M 
    [   ]cmake-3.12.4-Darwin-x86_64.tar.gz2018-11-02 13:51 28M 
    [TXT]cmake-3.12.4-Linux-x86_64.sh2018-11-02 13:51 33M 
    [   ]cmake-3.12.4-Linux-x86_64.tar.gz2018-11-02 13:51 33M 
    [TXT]cmake-3.12.4-SHA-256.txt2018-11-02 13:51 1.0K 
    [TXT]cmake-3.12.4-SHA-256.txt.asc2018-11-02 13:51 833  
    [   ]cmake-3.12.4-win32-x86.msi2018-11-02 13:51 16M 
    [   ]cmake-3.12.4-win32-x86.zip2018-11-02 13:51 22M 
    [   ]cmake-3.12.4-win64-x64.msi2018-11-02 13:51 18M 
    [   ]cmake-3.12.4-win64-x64.zip2018-11-02 13:51 25M 
    [   ]cmake-3.12.4.tar.Z2018-11-02 13:51 13M 
    [   ]cmake-3.12.4.tar.gz2018-11-02 13:51 8.0M 
    [   ]cmake-3.12.4.zip2018-11-02 13:51 13M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.13/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.13/index.html deleted file mode 100644 index b50a791b80..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.13/index.html +++ /dev/null @@ -1,131 +0,0 @@ - - - - Index of /files/v3.13 - - -

    Index of /files/v3.13

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.13.0-Darwin-x86_64.dmg2018-11-20 14:05 31M 
    [   ]cmake-3.13.0-Darwin-x86_64.tar.gz2018-11-20 14:05 31M 
    [TXT]cmake-3.13.0-Linux-x86_64.sh2018-11-20 14:05 37M 
    [   ]cmake-3.13.0-Linux-x86_64.tar.gz2018-11-20 14:05 37M 
    [TXT]cmake-3.13.0-SHA-256.txt2018-11-20 14:05 1.0K 
    [TXT]cmake-3.13.0-SHA-256.txt.asc2018-11-20 14:05 833  
    [   ]cmake-3.13.0-rc1-Darwin-x86_64.dmg2018-10-09 11:32 29M 
    [   ]cmake-3.13.0-rc1-Darwin-x86_64.tar.gz2018-10-09 11:32 28M 
    [TXT]cmake-3.13.0-rc1-Linux-x86_64.sh2018-10-09 11:32 34M 
    [   ]cmake-3.13.0-rc1-Linux-x86_64.tar.gz2018-10-09 11:32 34M 
    [TXT]cmake-3.13.0-rc1-SHA-256.txt2018-10-09 11:32 1.0K 
    [TXT]cmake-3.13.0-rc1-SHA-256.txt.asc2018-10-09 11:32 833  
    [   ]cmake-3.13.0-rc1-win32-x86.msi2018-10-09 11:32 16M 
    [   ]cmake-3.13.0-rc1-win32-x86.zip2018-10-09 11:32 23M 
    [   ]cmake-3.13.0-rc1-win64-x64.msi2018-10-09 11:32 19M 
    [   ]cmake-3.13.0-rc1-win64-x64.zip2018-10-09 11:32 26M 
    [   ]cmake-3.13.0-rc1.tar.Z2018-10-09 11:32 13M 
    [   ]cmake-3.13.0-rc1.tar.gz2018-10-09 11:32 8.2M 
    [   ]cmake-3.13.0-rc1.zip2018-10-09 11:32 13M 
    [   ]cmake-3.13.0-rc2-Darwin-x86_64.dmg2018-10-25 10:28 31M 
    [   ]cmake-3.13.0-rc2-Darwin-x86_64.tar.gz2018-10-25 10:28 31M 
    [TXT]cmake-3.13.0-rc2-Linux-x86_64.sh2018-10-25 10:27 37M 
    [   ]cmake-3.13.0-rc2-Linux-x86_64.tar.gz2018-10-25 10:27 37M 
    [TXT]cmake-3.13.0-rc2-SHA-256.txt2018-10-25 10:27 1.0K 
    [TXT]cmake-3.13.0-rc2-SHA-256.txt.asc2018-10-25 10:27 833  
    [   ]cmake-3.13.0-rc2-win32-x86.msi2018-10-25 10:27 19M 
    [   ]cmake-3.13.0-rc2-win32-x86.zip2018-10-25 10:27 25M 
    [   ]cmake-3.13.0-rc2-win64-x64.msi2018-10-25 10:27 21M 
    [   ]cmake-3.13.0-rc2-win64-x64.zip2018-10-25 10:27 28M 
    [   ]cmake-3.13.0-rc2.tar.Z2018-10-25 10:27 13M 
    [   ]cmake-3.13.0-rc2.tar.gz2018-10-25 10:27 8.2M 
    [   ]cmake-3.13.0-rc2.zip2018-10-25 10:27 13M 
    [   ]cmake-3.13.0-rc3-Darwin-x86_64.dmg2018-11-07 12:11 31M 
    [   ]cmake-3.13.0-rc3-Darwin-x86_64.tar.gz2018-11-07 12:11 31M 
    [TXT]cmake-3.13.0-rc3-Linux-x86_64.sh2018-11-07 12:11 37M 
    [   ]cmake-3.13.0-rc3-Linux-x86_64.tar.gz2018-11-07 12:11 37M 
    [TXT]cmake-3.13.0-rc3-SHA-256.txt2018-11-07 12:11 1.0K 
    [TXT]cmake-3.13.0-rc3-SHA-256.txt.asc2018-11-07 12:11 833  
    [   ]cmake-3.13.0-rc3-win32-x86.msi2018-11-07 12:11 19M 
    [   ]cmake-3.13.0-rc3-win32-x86.zip2018-11-07 12:11 25M 
    [   ]cmake-3.13.0-rc3-win64-x64.msi2018-11-07 12:11 21M 
    [   ]cmake-3.13.0-rc3-win64-x64.zip2018-11-07 12:11 28M 
    [   ]cmake-3.13.0-rc3.tar.Z2018-11-07 12:11 13M 
    [   ]cmake-3.13.0-rc3.tar.gz2018-11-07 12:11 8.2M 
    [   ]cmake-3.13.0-rc3.zip2018-11-07 12:11 13M 
    [   ]cmake-3.13.0-win32-x86.msi2018-11-20 14:05 19M 
    [   ]cmake-3.13.0-win32-x86.zip2018-11-20 14:05 25M 
    [   ]cmake-3.13.0-win64-x64.msi2018-11-20 14:05 21M 
    [   ]cmake-3.13.0-win64-x64.zip2018-11-20 14:05 28M 
    [   ]cmake-3.13.0.tar.Z2018-11-20 14:04 13M 
    [   ]cmake-3.13.0.tar.gz2018-11-20 14:04 8.2M 
    [   ]cmake-3.13.0.zip2018-11-20 14:04 13M 
    [   ]cmake-3.13.1-Darwin-x86_64.dmg2018-11-28 08:50 31M 
    [   ]cmake-3.13.1-Darwin-x86_64.tar.gz2018-11-28 08:50 31M 
    [TXT]cmake-3.13.1-Linux-x86_64.sh2018-11-28 08:50 37M 
    [   ]cmake-3.13.1-Linux-x86_64.tar.gz2018-11-28 08:50 37M 
    [TXT]cmake-3.13.1-SHA-256.txt2018-11-28 08:50 1.0K 
    [TXT]cmake-3.13.1-SHA-256.txt.asc2018-11-28 08:50 833  
    [   ]cmake-3.13.1-win32-x86.msi2018-11-28 08:50 19M 
    [   ]cmake-3.13.1-win32-x86.zip2018-11-28 08:50 25M 
    [   ]cmake-3.13.1-win64-x64.msi2018-11-28 08:50 21M 
    [   ]cmake-3.13.1-win64-x64.zip2018-11-28 08:50 28M 
    [   ]cmake-3.13.1.tar.Z2018-11-28 08:50 13M 
    [   ]cmake-3.13.1.tar.gz2018-11-28 08:50 8.2M 
    [   ]cmake-3.13.1.zip2018-11-28 08:50 13M 
    [   ]cmake-3.13.2-Darwin-x86_64.dmg2018-12-13 08:41 31M 
    [   ]cmake-3.13.2-Darwin-x86_64.tar.gz2018-12-13 08:41 31M 
    [TXT]cmake-3.13.2-Linux-x86_64.sh2018-12-13 08:41 37M 
    [   ]cmake-3.13.2-Linux-x86_64.tar.gz2018-12-13 08:41 37M 
    [TXT]cmake-3.13.2-SHA-256.txt2018-12-13 08:41 1.0K 
    [TXT]cmake-3.13.2-SHA-256.txt.asc2018-12-13 08:41 833  
    [   ]cmake-3.13.2-win32-x86.msi2018-12-13 08:41 19M 
    [   ]cmake-3.13.2-win32-x86.zip2018-12-13 08:41 25M 
    [   ]cmake-3.13.2-win64-x64.msi2018-12-13 08:41 21M 
    [   ]cmake-3.13.2-win64-x64.zip2018-12-13 08:41 28M 
    [   ]cmake-3.13.2.tar.Z2018-12-13 08:41 13M 
    [   ]cmake-3.13.2.tar.gz2018-12-13 08:41 8.2M 
    [   ]cmake-3.13.2.zip2018-12-13 08:41 13M 
    [   ]cmake-3.13.3-Darwin-x86_64.dmg2019-01-14 09:26 31M 
    [   ]cmake-3.13.3-Darwin-x86_64.tar.gz2019-01-14 09:26 31M 
    [TXT]cmake-3.13.3-Linux-x86_64.sh2019-01-14 09:25 37M 
    [   ]cmake-3.13.3-Linux-x86_64.tar.gz2019-01-14 09:25 37M 
    [TXT]cmake-3.13.3-SHA-256.txt2019-01-14 09:25 1.0K 
    [TXT]cmake-3.13.3-SHA-256.txt.asc2019-01-14 09:25 833  
    [   ]cmake-3.13.3-win32-x86.msi2019-01-14 09:25 19M 
    [   ]cmake-3.13.3-win32-x86.zip2019-01-14 09:25 25M 
    [   ]cmake-3.13.3-win64-x64.msi2019-01-14 09:25 21M 
    [   ]cmake-3.13.3-win64-x64.zip2019-01-14 09:25 28M 
    [   ]cmake-3.13.3.tar.Z2019-01-14 09:25 13M 
    [   ]cmake-3.13.3.tar.gz2019-01-14 09:25 8.2M 
    [   ]cmake-3.13.3.zip2019-01-14 09:25 13M 
    [   ]cmake-3.13.4-Darwin-x86_64.dmg2019-02-01 13:20 31M 
    [   ]cmake-3.13.4-Darwin-x86_64.tar.gz2019-02-01 13:20 31M 
    [TXT]cmake-3.13.4-Linux-x86_64.sh2019-02-01 13:20 37M 
    [   ]cmake-3.13.4-Linux-x86_64.tar.gz2019-02-01 13:20 37M 
    [TXT]cmake-3.13.4-SHA-256.txt2019-02-01 13:20 1.0K 
    [TXT]cmake-3.13.4-SHA-256.txt.asc2019-02-01 13:20 833  
    [   ]cmake-3.13.4-win32-x86.msi2019-02-01 13:20 19M 
    [   ]cmake-3.13.4-win32-x86.zip2019-02-01 13:20 25M 
    [   ]cmake-3.13.4-win64-x64.msi2019-02-01 13:20 21M 
    [   ]cmake-3.13.4-win64-x64.zip2019-02-01 13:20 28M 
    [   ]cmake-3.13.4.tar.Z2019-02-01 13:20 13M 
    [   ]cmake-3.13.4.tar.gz2019-02-01 13:20 8.2M 
    [   ]cmake-3.13.4.zip2019-02-01 13:20 13M 
    [   ]cmake-3.13.5-Darwin-x86_64.dmg2019-05-14 12:44 31M 
    [   ]cmake-3.13.5-Darwin-x86_64.tar.gz2019-05-14 12:44 31M 
    [TXT]cmake-3.13.5-Linux-x86_64.sh2019-05-14 12:44 37M 
    [   ]cmake-3.13.5-Linux-x86_64.tar.gz2019-05-14 12:44 37M 
    [TXT]cmake-3.13.5-SHA-256.txt2019-05-14 12:43 1.0K 
    [TXT]cmake-3.13.5-SHA-256.txt.asc2019-05-14 12:43 833  
    [   ]cmake-3.13.5-win32-x86.msi2019-05-14 12:43 19M 
    [   ]cmake-3.13.5-win32-x86.zip2019-05-14 12:43 25M 
    [   ]cmake-3.13.5-win64-x64.msi2019-05-14 12:43 21M 
    [   ]cmake-3.13.5-win64-x64.zip2019-05-14 12:43 28M 
    [   ]cmake-3.13.5.tar.Z2019-05-14 12:43 13M 
    [   ]cmake-3.13.5.tar.gz2019-05-14 12:43 8.2M 
    [   ]cmake-3.13.5.zip2019-05-14 12:43 13M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.14/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.14/index.html deleted file mode 100644 index bec9c14d45..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.14/index.html +++ /dev/null @@ -1,170 +0,0 @@ - - - - Index of /files/v3.14 - - -

    Index of /files/v3.14

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.14.0-Darwin-x86_64.dmg2019-03-14 11:37 33M 
    [   ]cmake-3.14.0-Darwin-x86_64.tar.gz2019-03-14 11:37 32M 
    [TXT]cmake-3.14.0-Linux-x86_64.sh2019-03-14 11:37 35M 
    [   ]cmake-3.14.0-Linux-x86_64.tar.gz2019-03-14 11:37 35M 
    [TXT]cmake-3.14.0-SHA-256.txt2019-03-22 11:03 1.0K 
    [TXT]cmake-3.14.0-SHA-256.txt.asc2019-03-22 11:03 833  
    [   ]cmake-3.14.0-rc1-Darwin-x86_64.dmg2019-02-07 10:36 33M 
    [   ]cmake-3.14.0-rc1-Darwin-x86_64.tar.gz2019-02-07 10:36 32M 
    [TXT]cmake-3.14.0-rc1-Linux-x86_64.sh2019-02-07 10:36 35M 
    [   ]cmake-3.14.0-rc1-Linux-x86_64.tar.gz2019-02-07 10:36 35M 
    [TXT]cmake-3.14.0-rc1-SHA-256.txt2019-02-07 10:36 1.0K 
    [TXT]cmake-3.14.0-rc1-SHA-256.txt.asc2019-02-07 10:36 833  
    [   ]cmake-3.14.0-rc1-win32-x86.msi2019-02-07 10:36 19M 
    [   ]cmake-3.14.0-rc1-win32-x86.zip2019-02-07 10:36 26M 
    [   ]cmake-3.14.0-rc1-win64-x64.msi2019-02-07 10:36 22M 
    [   ]cmake-3.14.0-rc1-win64-x64.zip2019-02-07 10:36 29M 
    [   ]cmake-3.14.0-rc1.tar.Z2019-02-07 10:36 14M 
    [   ]cmake-3.14.0-rc1.tar.gz2019-02-07 10:35 8.4M 
    [   ]cmake-3.14.0-rc1.zip2019-02-07 10:35 14M 
    [   ]cmake-3.14.0-rc2-Darwin-x86_64.dmg2019-02-15 10:04 33M 
    [   ]cmake-3.14.0-rc2-Darwin-x86_64.tar.gz2019-02-15 10:04 32M 
    [TXT]cmake-3.14.0-rc2-Linux-x86_64.sh2019-02-15 10:04 35M 
    [   ]cmake-3.14.0-rc2-Linux-x86_64.tar.gz2019-02-15 10:04 35M 
    [TXT]cmake-3.14.0-rc2-SHA-256.txt2019-02-15 10:04 1.0K 
    [TXT]cmake-3.14.0-rc2-SHA-256.txt.asc2019-02-15 10:04 833  
    [   ]cmake-3.14.0-rc2-win32-x86.msi2019-02-15 10:04 19M 
    [   ]cmake-3.14.0-rc2-win32-x86.zip2019-02-15 10:04 26M 
    [   ]cmake-3.14.0-rc2-win64-x64.msi2019-02-15 10:03 22M 
    [   ]cmake-3.14.0-rc2-win64-x64.zip2019-02-15 10:03 29M 
    [   ]cmake-3.14.0-rc2.tar.Z2019-02-15 10:03 14M 
    [   ]cmake-3.14.0-rc2.tar.gz2019-02-15 10:03 8.4M 
    [   ]cmake-3.14.0-rc2.zip2019-02-15 10:03 14M 
    [   ]cmake-3.14.0-rc3-Darwin-x86_64.dmg2019-03-01 11:20 33M 
    [   ]cmake-3.14.0-rc3-Darwin-x86_64.tar.gz2019-03-01 11:20 32M 
    [TXT]cmake-3.14.0-rc3-Linux-x86_64.sh2019-03-01 11:20 35M 
    [   ]cmake-3.14.0-rc3-Linux-x86_64.tar.gz2019-03-01 11:20 35M 
    [TXT]cmake-3.14.0-rc3-SHA-256.txt2019-03-01 11:20 1.0K 
    [TXT]cmake-3.14.0-rc3-SHA-256.txt.asc2019-03-01 11:20 833  
    [   ]cmake-3.14.0-rc3-win32-x86.msi2019-03-01 11:20 19M 
    [   ]cmake-3.14.0-rc3-win32-x86.zip2019-03-01 11:20 26M 
    [   ]cmake-3.14.0-rc3-win64-x64.msi2019-03-01 11:20 22M 
    [   ]cmake-3.14.0-rc3-win64-x64.zip2019-03-01 11:20 29M 
    [   ]cmake-3.14.0-rc3.tar.Z2019-03-01 11:20 14M 
    [   ]cmake-3.14.0-rc3.tar.gz2019-03-01 11:19 8.4M 
    [   ]cmake-3.14.0-rc3.zip2019-03-01 11:19 14M 
    [   ]cmake-3.14.0-rc4-Darwin-x86_64.dmg2019-03-08 11:09 33M 
    [   ]cmake-3.14.0-rc4-Darwin-x86_64.tar.gz2019-03-08 11:09 32M 
    [TXT]cmake-3.14.0-rc4-Linux-x86_64.sh2019-03-08 11:09 35M 
    [   ]cmake-3.14.0-rc4-Linux-x86_64.tar.gz2019-03-08 11:09 35M 
    [TXT]cmake-3.14.0-rc4-SHA-256.txt2019-03-08 11:09 1.0K 
    [TXT]cmake-3.14.0-rc4-SHA-256.txt.asc2019-03-08 11:09 833  
    [   ]cmake-3.14.0-rc4-win32-x86.msi2019-03-08 11:08 20M 
    [   ]cmake-3.14.0-rc4-win32-x86.zip2019-03-08 11:08 26M 
    [   ]cmake-3.14.0-rc4-win64-x64.msi2019-03-08 11:08 22M 
    [   ]cmake-3.14.0-rc4-win64-x64.zip2019-03-08 11:08 30M 
    [   ]cmake-3.14.0-rc4.tar.Z2019-03-08 11:08 14M 
    [   ]cmake-3.14.0-rc4.tar.gz2019-03-08 11:08 8.4M 
    [   ]cmake-3.14.0-rc4.zip2019-03-08 11:08 14M 
    [   ]cmake-3.14.0-win32-x86.msi2019-03-14 11:36 20M 
    [   ]cmake-3.14.0-win32-x86.zip2019-03-14 11:36 26M 
    [   ]cmake-3.14.0-win64-x64.msi2019-03-22 11:03 22M 
    [   ]cmake-3.14.0-win64-x64.zip2019-03-22 11:03 30M 
    [   ]cmake-3.14.0.tar.Z2019-03-14 11:36 14M 
    [   ]cmake-3.14.0.tar.gz2019-03-14 11:36 8.4M 
    [   ]cmake-3.14.0.zip2019-03-14 11:36 14M 
    [   ]cmake-3.14.1-Darwin-x86_64.dmg2019-03-29 12:20 33M 
    [   ]cmake-3.14.1-Darwin-x86_64.tar.gz2019-03-29 12:20 32M 
    [TXT]cmake-3.14.1-Linux-x86_64.sh2019-03-29 12:20 35M 
    [   ]cmake-3.14.1-Linux-x86_64.tar.gz2019-03-29 12:19 35M 
    [TXT]cmake-3.14.1-SHA-256.txt2019-03-29 12:19 1.0K 
    [TXT]cmake-3.14.1-SHA-256.txt.asc2019-03-29 12:19 833  
    [   ]cmake-3.14.1-win32-x86.msi2019-03-29 12:19 20M 
    [   ]cmake-3.14.1-win32-x86.zip2019-03-29 12:19 26M 
    [   ]cmake-3.14.1-win64-x64.msi2019-03-29 12:19 22M 
    [   ]cmake-3.14.1-win64-x64.zip2019-03-29 12:19 30M 
    [   ]cmake-3.14.1.tar.Z2019-03-29 12:19 14M 
    [   ]cmake-3.14.1.tar.gz2019-03-29 12:19 8.4M 
    [   ]cmake-3.14.1.zip2019-03-29 12:19 14M 
    [   ]cmake-3.14.2-Darwin-x86_64.dmg2019-04-12 10:19 33M 
    [   ]cmake-3.14.2-Darwin-x86_64.tar.gz2019-04-12 10:19 32M 
    [TXT]cmake-3.14.2-Linux-x86_64.sh2019-04-12 10:19 35M 
    [   ]cmake-3.14.2-Linux-x86_64.tar.gz2019-04-12 10:19 35M 
    [TXT]cmake-3.14.2-SHA-256.txt2019-04-12 10:19 1.0K 
    [TXT]cmake-3.14.2-SHA-256.txt.asc2019-04-12 10:19 833  
    [   ]cmake-3.14.2-win32-x86.msi2019-04-12 10:19 20M 
    [   ]cmake-3.14.2-win32-x86.zip2019-04-12 10:19 26M 
    [   ]cmake-3.14.2-win64-x64.msi2019-04-12 10:19 22M 
    [   ]cmake-3.14.2-win64-x64.zip2019-04-12 10:19 30M 
    [   ]cmake-3.14.2.tar.Z2019-04-12 10:19 14M 
    [   ]cmake-3.14.2.tar.gz2019-04-12 10:19 8.4M 
    [   ]cmake-3.14.2.zip2019-04-12 10:19 14M 
    [   ]cmake-3.14.3-Darwin-x86_64.dmg2019-04-22 10:40 33M 
    [   ]cmake-3.14.3-Darwin-x86_64.tar.gz2019-04-22 10:39 32M 
    [TXT]cmake-3.14.3-Linux-x86_64.sh2019-04-22 10:39 35M 
    [   ]cmake-3.14.3-Linux-x86_64.tar.gz2019-04-22 10:39 35M 
    [TXT]cmake-3.14.3-SHA-256.txt2019-04-22 10:39 1.0K 
    [TXT]cmake-3.14.3-SHA-256.txt.asc2019-04-22 10:39 833  
    [   ]cmake-3.14.3-win32-x86.msi2019-04-22 10:39 20M 
    [   ]cmake-3.14.3-win32-x86.zip2019-04-22 10:39 26M 
    [   ]cmake-3.14.3-win64-x64.msi2019-04-22 10:39 22M 
    [   ]cmake-3.14.3-win64-x64.zip2019-04-22 10:39 30M 
    [   ]cmake-3.14.3.tar.Z2019-04-22 10:39 14M 
    [   ]cmake-3.14.3.tar.gz2019-04-22 10:39 8.4M 
    [   ]cmake-3.14.3.zip2019-04-22 10:39 14M 
    [   ]cmake-3.14.4-Darwin-x86_64.dmg2019-05-14 14:00 33M 
    [   ]cmake-3.14.4-Darwin-x86_64.tar.gz2019-05-14 14:00 32M 
    [TXT]cmake-3.14.4-Linux-x86_64.sh2019-05-14 14:00 35M 
    [   ]cmake-3.14.4-Linux-x86_64.tar.gz2019-05-14 13:59 35M 
    [TXT]cmake-3.14.4-SHA-256.txt2019-05-14 13:59 1.0K 
    [TXT]cmake-3.14.4-SHA-256.txt.asc2019-05-14 13:59 833  
    [   ]cmake-3.14.4-win32-x86.msi2019-05-14 13:59 20M 
    [   ]cmake-3.14.4-win32-x86.zip2019-05-14 13:59 26M 
    [   ]cmake-3.14.4-win64-x64.msi2019-05-14 13:59 22M 
    [   ]cmake-3.14.4-win64-x64.zip2019-05-14 13:59 30M 
    [   ]cmake-3.14.4.tar.Z2019-05-14 13:59 14M 
    [   ]cmake-3.14.4.tar.gz2019-05-14 13:59 8.4M 
    [   ]cmake-3.14.4.zip2019-05-14 13:59 14M 
    [   ]cmake-3.14.5-Darwin-x86_64.dmg2019-05-31 12:39 33M 
    [   ]cmake-3.14.5-Darwin-x86_64.tar.gz2019-05-31 12:39 32M 
    [TXT]cmake-3.14.5-Linux-x86_64.sh2019-05-31 12:39 35M 
    [   ]cmake-3.14.5-Linux-x86_64.tar.gz2019-05-31 12:39 35M 
    [TXT]cmake-3.14.5-SHA-256.txt2019-05-31 12:39 1.0K 
    [TXT]cmake-3.14.5-SHA-256.txt.asc2019-05-31 12:39 833  
    [   ]cmake-3.14.5-win32-x86.msi2019-05-31 12:39 20M 
    [   ]cmake-3.14.5-win32-x86.zip2019-05-31 12:39 26M 
    [   ]cmake-3.14.5-win64-x64.msi2019-05-31 12:39 22M 
    [   ]cmake-3.14.5-win64-x64.zip2019-05-31 12:39 30M 
    [   ]cmake-3.14.5.tar.Z2019-05-31 12:39 14M 
    [   ]cmake-3.14.5.tar.gz2019-05-31 12:39 8.4M 
    [   ]cmake-3.14.5.zip2019-05-31 12:40 14M 
    [   ]cmake-3.14.6-Darwin-x86_64.dmg2019-07-16 09:33 33M 
    [   ]cmake-3.14.6-Darwin-x86_64.tar.gz2019-07-16 09:33 32M 
    [TXT]cmake-3.14.6-Linux-x86_64.sh2019-07-16 09:33 35M 
    [   ]cmake-3.14.6-Linux-x86_64.tar.gz2019-07-16 09:33 35M 
    [TXT]cmake-3.14.6-SHA-256.txt2019-07-16 09:33 1.0K 
    [TXT]cmake-3.14.6-SHA-256.txt.asc2019-07-16 09:33 833  
    [   ]cmake-3.14.6-win32-x86.msi2019-07-16 09:34 20M 
    [   ]cmake-3.14.6-win32-x86.zip2019-07-16 09:34 26M 
    [   ]cmake-3.14.6-win64-x64.msi2019-07-16 09:34 22M 
    [   ]cmake-3.14.6-win64-x64.zip2019-07-16 09:34 30M 
    [   ]cmake-3.14.6.tar.Z2019-07-16 09:34 14M 
    [   ]cmake-3.14.6.tar.gz2019-07-16 09:34 8.5M 
    [   ]cmake-3.14.6.zip2019-07-16 09:34 14M 
    [   ]cmake-3.14.7-Darwin-x86_64.dmg2019-10-02 10:48 33M 
    [   ]cmake-3.14.7-Darwin-x86_64.tar.gz2019-10-02 10:48 32M 
    [TXT]cmake-3.14.7-Linux-x86_64.sh2019-10-02 10:48 35M 
    [   ]cmake-3.14.7-Linux-x86_64.tar.gz2019-10-02 10:48 35M 
    [TXT]cmake-3.14.7-SHA-256.txt2019-10-02 10:48 1.0K 
    [TXT]cmake-3.14.7-SHA-256.txt.asc2019-10-02 10:48 833  
    [   ]cmake-3.14.7-win32-x86.msi2019-10-02 10:48 20M 
    [   ]cmake-3.14.7-win32-x86.zip2019-10-02 10:48 26M 
    [   ]cmake-3.14.7-win64-x64.msi2019-10-02 10:48 21M 
    [   ]cmake-3.14.7-win64-x64.zip2019-10-02 10:48 29M 
    [   ]cmake-3.14.7.tar.Z2019-10-02 10:48 14M 
    [   ]cmake-3.14.7.tar.gz2019-10-02 10:48 8.5M 
    [   ]cmake-3.14.7.zip2019-10-02 10:48 14M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.15/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.15/index.html deleted file mode 100644 index 92726767db..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.15/index.html +++ /dev/null @@ -1,157 +0,0 @@ - - - - Index of /files/v3.15 - - -

    Index of /files/v3.15

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.15.0-Darwin-x86_64.dmg2019-07-17 10:38 33M 
    [   ]cmake-3.15.0-Darwin-x86_64.tar.gz2019-07-17 10:38 33M 
    [TXT]cmake-3.15.0-Linux-x86_64.sh2019-07-17 10:38 37M 
    [   ]cmake-3.15.0-Linux-x86_64.tar.gz2019-07-17 10:38 37M 
    [TXT]cmake-3.15.0-SHA-256.txt2019-07-17 10:38 1.0K 
    [TXT]cmake-3.15.0-SHA-256.txt.asc2019-07-17 10:38 833  
    [   ]cmake-3.15.0-rc1-Darwin-x86_64.dmg2019-06-04 14:22 33M 
    [   ]cmake-3.15.0-rc1-Darwin-x86_64.tar.gz2019-06-04 14:22 33M 
    [TXT]cmake-3.15.0-rc1-Linux-x86_64.sh2019-06-04 14:22 37M 
    [   ]cmake-3.15.0-rc1-Linux-x86_64.tar.gz2019-06-04 14:22 37M 
    [TXT]cmake-3.15.0-rc1-SHA-256.txt2019-06-04 14:22 1.0K 
    [TXT]cmake-3.15.0-rc1-SHA-256.txt.asc2019-06-04 14:22 833  
    [   ]cmake-3.15.0-rc1-win32-x86.msi2019-06-04 14:23 20M 
    [   ]cmake-3.15.0-rc1-win32-x86.zip2019-06-04 14:23 28M 
    [   ]cmake-3.15.0-rc1-win64-x64.msi2019-06-04 14:23 23M 
    [   ]cmake-3.15.0-rc1-win64-x64.zip2019-06-04 14:23 31M 
    [   ]cmake-3.15.0-rc1.tar.Z2019-06-04 14:23 14M 
    [   ]cmake-3.15.0-rc1.tar.gz2019-06-04 14:22 8.8M 
    [   ]cmake-3.15.0-rc1.zip2019-06-04 14:23 15M 
    [   ]cmake-3.15.0-rc2-Darwin-x86_64.dmg2019-06-19 10:04 33M 
    [   ]cmake-3.15.0-rc2-Darwin-x86_64.tar.gz2019-06-19 10:04 33M 
    [TXT]cmake-3.15.0-rc2-Linux-x86_64.sh2019-06-19 10:04 37M 
    [   ]cmake-3.15.0-rc2-Linux-x86_64.tar.gz2019-06-19 10:04 37M 
    [TXT]cmake-3.15.0-rc2-SHA-256.txt2019-06-19 10:04 1.0K 
    [TXT]cmake-3.15.0-rc2-SHA-256.txt.asc2019-06-19 10:04 833  
    [   ]cmake-3.15.0-rc2-win32-x86.msi2019-06-19 10:04 20M 
    [   ]cmake-3.15.0-rc2-win32-x86.zip2019-06-19 10:04 28M 
    [   ]cmake-3.15.0-rc2-win64-x64.msi2019-06-19 10:04 23M 
    [   ]cmake-3.15.0-rc2-win64-x64.zip2019-06-19 10:04 31M 
    [   ]cmake-3.15.0-rc2.tar.Z2019-06-19 10:04 14M 
    [   ]cmake-3.15.0-rc2.tar.gz2019-06-19 10:04 8.8M 
    [   ]cmake-3.15.0-rc2.zip2019-06-19 10:04 15M 
    [   ]cmake-3.15.0-rc3-Darwin-x86_64.dmg2019-06-27 11:33 33M 
    [   ]cmake-3.15.0-rc3-Darwin-x86_64.tar.gz2019-06-27 11:33 33M 
    [TXT]cmake-3.15.0-rc3-Linux-x86_64.sh2019-06-27 11:33 37M 
    [   ]cmake-3.15.0-rc3-Linux-x86_64.tar.gz2019-06-27 11:33 37M 
    [TXT]cmake-3.15.0-rc3-SHA-256.txt2019-06-27 11:33 1.0K 
    [TXT]cmake-3.15.0-rc3-SHA-256.txt.asc2019-06-27 11:33 833  
    [   ]cmake-3.15.0-rc3-win32-x86.msi2019-06-27 11:33 20M 
    [   ]cmake-3.15.0-rc3-win32-x86.zip2019-06-27 11:33 28M 
    [   ]cmake-3.15.0-rc3-win64-x64.msi2019-06-27 11:33 23M 
    [   ]cmake-3.15.0-rc3-win64-x64.zip2019-06-27 11:33 31M 
    [   ]cmake-3.15.0-rc3.tar.Z2019-06-27 11:33 14M 
    [   ]cmake-3.15.0-rc3.tar.gz2019-06-27 11:33 8.8M 
    [   ]cmake-3.15.0-rc3.zip2019-06-27 11:33 15M 
    [   ]cmake-3.15.0-rc4-Darwin-x86_64.dmg2019-07-10 15:06 33M 
    [   ]cmake-3.15.0-rc4-Darwin-x86_64.tar.gz2019-07-10 15:07 33M 
    [TXT]cmake-3.15.0-rc4-Linux-x86_64.sh2019-07-10 15:07 37M 
    [   ]cmake-3.15.0-rc4-Linux-x86_64.tar.gz2019-07-10 15:07 37M 
    [TXT]cmake-3.15.0-rc4-SHA-256.txt2019-07-10 15:07 1.0K 
    [TXT]cmake-3.15.0-rc4-SHA-256.txt.asc2019-07-10 15:07 833  
    [   ]cmake-3.15.0-rc4-win32-x86.msi2019-07-10 15:07 20M 
    [   ]cmake-3.15.0-rc4-win32-x86.zip2019-07-10 15:07 28M 
    [   ]cmake-3.15.0-rc4-win64-x64.msi2019-07-10 15:07 23M 
    [   ]cmake-3.15.0-rc4-win64-x64.zip2019-07-10 15:07 31M 
    [   ]cmake-3.15.0-rc4.tar.Z2019-07-10 15:07 14M 
    [   ]cmake-3.15.0-rc4.tar.gz2019-07-10 15:07 8.8M 
    [   ]cmake-3.15.0-rc4.zip2019-07-10 15:07 15M 
    [   ]cmake-3.15.0-win32-x86.msi2019-07-17 10:38 20M 
    [   ]cmake-3.15.0-win32-x86.zip2019-07-17 10:38 27M 
    [   ]cmake-3.15.0-win64-x64.msi2019-07-17 10:38 23M 
    [   ]cmake-3.15.0-win64-x64.zip2019-07-17 10:38 31M 
    [   ]cmake-3.15.0.tar.Z2019-07-17 10:38 14M 
    [   ]cmake-3.15.0.tar.gz2019-07-17 10:38 8.8M 
    [   ]cmake-3.15.0.zip2019-07-17 10:38 15M 
    [   ]cmake-3.15.1-Darwin-x86_64.dmg2019-07-26 10:14 33M 
    [   ]cmake-3.15.1-Darwin-x86_64.tar.gz2019-07-26 10:14 33M 
    [TXT]cmake-3.15.1-Linux-x86_64.sh2019-07-26 10:14 37M 
    [   ]cmake-3.15.1-Linux-x86_64.tar.gz2019-07-26 10:14 37M 
    [TXT]cmake-3.15.1-SHA-256.txt2019-07-26 10:14 1.0K 
    [TXT]cmake-3.15.1-SHA-256.txt.asc2019-07-26 10:14 833  
    [   ]cmake-3.15.1-win32-x86.msi2019-07-26 10:14 20M 
    [   ]cmake-3.15.1-win32-x86.zip2019-07-26 10:14 27M 
    [   ]cmake-3.15.1-win64-x64.msi2019-07-26 10:14 23M 
    [   ]cmake-3.15.1-win64-x64.zip2019-07-26 10:14 31M 
    [   ]cmake-3.15.1.tar.Z2019-07-26 10:14 14M 
    [   ]cmake-3.15.1.tar.gz2019-07-26 10:14 8.8M 
    [   ]cmake-3.15.1.zip2019-07-26 10:15 15M 
    [   ]cmake-3.15.2-Darwin-x86_64.dmg2019-08-07 15:05 33M 
    [   ]cmake-3.15.2-Darwin-x86_64.tar.gz2019-08-07 15:05 33M 
    [TXT]cmake-3.15.2-Linux-x86_64.sh2019-08-07 15:05 37M 
    [   ]cmake-3.15.2-Linux-x86_64.tar.gz2019-08-07 15:05 37M 
    [TXT]cmake-3.15.2-SHA-256.txt2019-08-07 15:05 1.0K 
    [TXT]cmake-3.15.2-SHA-256.txt.asc2019-08-07 15:05 833  
    [   ]cmake-3.15.2-win32-x86.msi2019-08-07 15:05 20M 
    [   ]cmake-3.15.2-win32-x86.zip2019-08-07 15:05 27M 
    [   ]cmake-3.15.2-win64-x64.msi2019-08-07 15:05 23M 
    [   ]cmake-3.15.2-win64-x64.zip2019-08-07 15:05 31M 
    [   ]cmake-3.15.2.tar.Z2019-08-07 15:05 14M 
    [   ]cmake-3.15.2.tar.gz2019-08-07 15:05 8.8M 
    [   ]cmake-3.15.2.zip2019-08-07 15:05 15M 
    [   ]cmake-3.15.3-Darwin-x86_64.dmg2019-09-04 11:13 33M 
    [   ]cmake-3.15.3-Darwin-x86_64.tar.gz2019-09-04 11:13 33M 
    [TXT]cmake-3.15.3-Linux-x86_64.sh2019-09-04 11:13 37M 
    [   ]cmake-3.15.3-Linux-x86_64.tar.gz2019-09-04 11:13 37M 
    [TXT]cmake-3.15.3-SHA-256.txt2019-09-04 11:13 1.0K 
    [TXT]cmake-3.15.3-SHA-256.txt.asc2019-09-04 11:13 833  
    [   ]cmake-3.15.3-win32-x86.msi2019-09-04 11:13 20M 
    [   ]cmake-3.15.3-win32-x86.zip2019-09-04 11:13 27M 
    [   ]cmake-3.15.3-win64-x64.msi2019-09-04 11:13 23M 
    [   ]cmake-3.15.3-win64-x64.zip2019-09-04 11:13 31M 
    [   ]cmake-3.15.3.tar.Z2019-09-04 11:13 14M 
    [   ]cmake-3.15.3.tar.gz2019-09-04 11:13 8.8M 
    [   ]cmake-3.15.3.zip2019-09-04 11:13 15M 
    [   ]cmake-3.15.4-Darwin-x86_64.dmg2019-10-02 10:45 33M 
    [   ]cmake-3.15.4-Darwin-x86_64.tar.gz2019-10-02 10:45 33M 
    [TXT]cmake-3.15.4-Linux-x86_64.sh2019-10-02 10:45 37M 
    [   ]cmake-3.15.4-Linux-x86_64.tar.gz2019-10-02 10:45 37M 
    [TXT]cmake-3.15.4-SHA-256.txt2019-10-02 10:45 1.0K 
    [TXT]cmake-3.15.4-SHA-256.txt.asc2019-10-02 10:45 833  
    [   ]cmake-3.15.4-win32-x86.msi2019-10-02 10:45 20M 
    [   ]cmake-3.15.4-win32-x86.zip2019-10-02 10:45 27M 
    [   ]cmake-3.15.4-win64-x64.msi2019-10-02 10:45 22M 
    [   ]cmake-3.15.4-win64-x64.zip2019-10-02 10:45 30M 
    [   ]cmake-3.15.4.tar.Z2019-10-02 10:45 14M 
    [   ]cmake-3.15.4.tar.gz2019-10-02 10:45 8.8M 
    [   ]cmake-3.15.4.zip2019-10-02 10:45 15M 
    [   ]cmake-3.15.5-Darwin-x86_64.dmg2019-10-30 10:49 33M 
    [   ]cmake-3.15.5-Darwin-x86_64.tar.gz2019-10-30 10:49 33M 
    [TXT]cmake-3.15.5-Linux-x86_64.sh2019-10-30 10:49 37M 
    [   ]cmake-3.15.5-Linux-x86_64.tar.gz2019-10-30 10:49 37M 
    [TXT]cmake-3.15.5-SHA-256.txt2019-10-30 10:49 1.0K 
    [TXT]cmake-3.15.5-SHA-256.txt.asc2019-10-30 10:49 833  
    [   ]cmake-3.15.5-win32-x86.msi2019-10-30 10:49 20M 
    [   ]cmake-3.15.5-win32-x86.zip2019-10-30 10:49 27M 
    [   ]cmake-3.15.5-win64-x64.msi2019-10-30 10:49 22M 
    [   ]cmake-3.15.5-win64-x64.zip2019-10-30 10:49 30M 
    [   ]cmake-3.15.5.tar.Z2019-10-30 10:49 14M 
    [   ]cmake-3.15.5.tar.gz2019-10-30 10:49 8.8M 
    [   ]cmake-3.15.5.zip2019-10-30 10:49 15M 
    [   ]cmake-3.15.6-Darwin-x86_64.dmg2019-12-16 11:30 34M 
    [   ]cmake-3.15.6-Darwin-x86_64.tar.gz2019-12-16 11:30 33M 
    [TXT]cmake-3.15.6-Linux-x86_64.sh2019-12-16 11:30 37M 
    [   ]cmake-3.15.6-Linux-x86_64.tar.gz2019-12-16 11:30 37M 
    [TXT]cmake-3.15.6-SHA-256.txt2019-12-16 11:30 1.0K 
    [TXT]cmake-3.15.6-SHA-256.txt.asc2019-12-16 11:30 833  
    [   ]cmake-3.15.6-win32-x86.msi2019-12-16 11:30 21M 
    [   ]cmake-3.15.6-win32-x86.zip2019-12-16 11:30 28M 
    [   ]cmake-3.15.6-win64-x64.msi2019-12-16 11:30 22M 
    [   ]cmake-3.15.6-win64-x64.zip2019-12-16 11:30 30M 
    [   ]cmake-3.15.6.tar.Z2019-12-16 11:30 14M 
    [   ]cmake-3.15.6.tar.gz2019-12-16 11:30 8.8M 
    [   ]cmake-3.15.6.zip2019-12-16 11:30 15M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.16/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.16/index.html deleted file mode 100644 index 74f9b40277..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.16/index.html +++ /dev/null @@ -1,86 +0,0 @@ - - - - Index of /files/v3.16 - - -

    Index of /files/v3.16

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.16.0-Darwin-x86_64.dmg2019-11-26 10:27 35M 
    [   ]cmake-3.16.0-Darwin-x86_64.tar.gz2019-11-26 10:27 34M 
    [TXT]cmake-3.16.0-Linux-x86_64.sh2019-11-26 10:27 38M 
    [   ]cmake-3.16.0-Linux-x86_64.tar.gz2019-11-26 10:27 38M 
    [TXT]cmake-3.16.0-SHA-256.txt2019-11-26 10:27 932  
    [TXT]cmake-3.16.0-SHA-256.txt.asc2019-11-26 10:27 833  
    [   ]cmake-3.16.0-rc1-Darwin-x86_64.dmg2019-10-10 14:18 34M 
    [   ]cmake-3.16.0-rc1-Darwin-x86_64.tar.gz2019-10-10 14:18 34M 
    [TXT]cmake-3.16.0-rc1-Linux-x86_64.sh2019-10-10 14:18 38M 
    [   ]cmake-3.16.0-rc1-Linux-x86_64.tar.gz2019-10-10 14:18 38M 
    [TXT]cmake-3.16.0-rc1-SHA-256.txt2019-10-10 14:18 972  
    [TXT]cmake-3.16.0-rc1-SHA-256.txt.asc2019-10-10 14:18 833  
    [   ]cmake-3.16.0-rc1-win32-x86.msi2019-10-10 14:18 21M 
    [   ]cmake-3.16.0-rc1-win32-x86.zip2019-10-10 14:18 28M 
    [   ]cmake-3.16.0-rc1-win64-x64.msi2019-10-10 14:18 23M 
    [   ]cmake-3.16.0-rc1-win64-x64.zip2019-10-10 14:18 31M 
    [   ]cmake-3.16.0-rc1.tar.gz2019-10-10 14:18 8.7M 
    [   ]cmake-3.16.0-rc1.zip2019-10-10 14:18 14M 
    [   ]cmake-3.16.0-rc2-Darwin-x86_64.dmg2019-10-18 10:47 34M 
    [   ]cmake-3.16.0-rc2-Darwin-x86_64.tar.gz2019-10-18 10:47 34M 
    [TXT]cmake-3.16.0-rc2-Linux-x86_64.sh2019-10-18 10:47 38M 
    [   ]cmake-3.16.0-rc2-Linux-x86_64.tar.gz2019-10-18 10:47 38M 
    [TXT]cmake-3.16.0-rc2-SHA-256.txt2019-10-18 10:47 972  
    [TXT]cmake-3.16.0-rc2-SHA-256.txt.asc2019-10-18 10:47 833  
    [   ]cmake-3.16.0-rc2-win32-x86.msi2019-10-18 10:47 21M 
    [   ]cmake-3.16.0-rc2-win32-x86.zip2019-10-18 10:47 28M 
    [   ]cmake-3.16.0-rc2-win64-x64.msi2019-10-18 10:47 23M 
    [   ]cmake-3.16.0-rc2-win64-x64.zip2019-10-18 10:47 31M 
    [   ]cmake-3.16.0-rc2.tar.gz2019-10-18 10:47 8.7M 
    [   ]cmake-3.16.0-rc2.zip2019-10-18 10:47 14M 
    [   ]cmake-3.16.0-rc3-Darwin-x86_64.dmg2019-10-31 12:09 34M 
    [   ]cmake-3.16.0-rc3-Darwin-x86_64.tar.gz2019-10-31 12:09 34M 
    [TXT]cmake-3.16.0-rc3-Linux-x86_64.sh2019-10-31 12:09 38M 
    [   ]cmake-3.16.0-rc3-Linux-x86_64.tar.gz2019-10-31 12:10 38M 
    [TXT]cmake-3.16.0-rc3-SHA-256.txt2019-10-31 12:10 972  
    [TXT]cmake-3.16.0-rc3-SHA-256.txt.asc2019-10-31 12:10 833  
    [   ]cmake-3.16.0-rc3-win32-x86.msi2019-10-31 12:10 21M 
    [   ]cmake-3.16.0-rc3-win32-x86.zip2019-10-31 12:10 28M 
    [   ]cmake-3.16.0-rc3-win64-x64.msi2019-10-31 12:10 23M 
    [   ]cmake-3.16.0-rc3-win64-x64.zip2019-10-31 12:10 31M 
    [   ]cmake-3.16.0-rc3.tar.gz2019-10-31 12:10 8.7M 
    [   ]cmake-3.16.0-rc3.zip2019-10-31 12:10 14M 
    [   ]cmake-3.16.0-rc4-Darwin-x86_64.dmg2019-11-18 17:01 35M 
    [   ]cmake-3.16.0-rc4-Darwin-x86_64.tar.gz2019-11-18 17:01 34M 
    [TXT]cmake-3.16.0-rc4-Linux-x86_64.sh2019-11-18 17:01 38M 
    [   ]cmake-3.16.0-rc4-Linux-x86_64.tar.gz2019-11-18 17:01 38M 
    [TXT]cmake-3.16.0-rc4-SHA-256.txt2019-11-18 17:01 972  
    [TXT]cmake-3.16.0-rc4-SHA-256.txt.asc2019-11-18 17:01 833  
    [   ]cmake-3.16.0-rc4-win32-x86.msi2019-11-18 17:01 21M 
    [   ]cmake-3.16.0-rc4-win32-x86.zip2019-11-18 17:01 28M 
    [   ]cmake-3.16.0-rc4-win64-x64.msi2019-11-18 17:01 23M 
    [   ]cmake-3.16.0-rc4-win64-x64.zip2019-11-18 17:01 31M 
    [   ]cmake-3.16.0-rc4.tar.gz2019-11-18 17:01 8.7M 
    [   ]cmake-3.16.0-rc4.zip2019-11-18 17:01 14M 
    [   ]cmake-3.16.0-win32-x86.msi2019-11-26 10:27 21M 
    [   ]cmake-3.16.0-win32-x86.zip2019-11-26 10:27 28M 
    [   ]cmake-3.16.0-win64-x64.msi2019-11-26 10:27 23M 
    [   ]cmake-3.16.0-win64-x64.zip2019-11-26 10:27 31M 
    [   ]cmake-3.16.0.tar.gz2019-11-26 10:27 8.7M 
    [   ]cmake-3.16.0.zip2019-11-26 10:27 14M 
    [   ]cmake-3.16.1-Darwin-x86_64.dmg2019-12-10 10:42 35M 
    [   ]cmake-3.16.1-Darwin-x86_64.tar.gz2019-12-10 10:42 34M 
    [TXT]cmake-3.16.1-Linux-x86_64.sh2019-12-10 10:42 38M 
    [   ]cmake-3.16.1-Linux-x86_64.tar.gz2019-12-10 10:42 38M 
    [TXT]cmake-3.16.1-SHA-256.txt2019-12-10 10:42 932  
    [TXT]cmake-3.16.1-SHA-256.txt.asc2019-12-10 10:42 833  
    [   ]cmake-3.16.1-win32-x86.msi2019-12-10 10:42 21M 
    [   ]cmake-3.16.1-win32-x86.zip2019-12-10 10:42 28M 
    [   ]cmake-3.16.1-win64-x64.msi2019-12-10 10:42 23M 
    [   ]cmake-3.16.1-win64-x64.zip2019-12-10 10:42 31M 
    [   ]cmake-3.16.1.tar.gz2019-12-10 10:42 8.7M 
    [   ]cmake-3.16.1.zip2019-12-10 10:42 14M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.2/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.2/index.html deleted file mode 100644 index a8c6c7f266..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.2/index.html +++ /dev/null @@ -1,132 +0,0 @@ - - - - Index of /files/v3.2 - - -

    Index of /files/v3.2

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.2.0-1-src.tar.bz22015-03-10 09:02 4.9M 
    [   ]cmake-3.2.0-1.tar.bz22015-03-10 09:02 9.4M 
    [   ]cmake-3.2.0-Darwin-universal.dmg2015-03-10 09:02 47M 
    [   ]cmake-3.2.0-Darwin-universal.tar.Z2015-03-10 09:02 66M 
    [   ]cmake-3.2.0-Darwin-universal.tar.gz2015-03-10 09:01 46M 
    [   ]cmake-3.2.0-Darwin-x86_64.dmg2015-03-10 09:01 27M 
    [   ]cmake-3.2.0-Darwin-x86_64.tar.Z2015-03-10 09:01 38M 
    [   ]cmake-3.2.0-Darwin-x86_64.tar.gz2015-03-10 09:01 26M 
    [TXT]cmake-3.2.0-Linux-i386.sh2015-03-10 09:01 25M 
    [   ]cmake-3.2.0-Linux-i386.tar.Z2015-03-10 09:01 36M 
    [   ]cmake-3.2.0-Linux-i386.tar.gz2015-03-10 09:01 25M 
    [TXT]cmake-3.2.0-Linux-x86_64.sh2015-03-10 09:01 26M 
    [   ]cmake-3.2.0-Linux-x86_64.tar.Z2015-03-10 09:01 36M 
    [   ]cmake-3.2.0-Linux-x86_64.tar.gz2015-03-10 09:01 26M 
    [TXT]cmake-3.2.0-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.2.0-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.2.0-rc1-Darwin-universal.dmg2015-02-13 15:03 47M 
    [   ]cmake-3.2.0-rc1-Darwin-universal.tar.Z2015-02-13 15:03 66M 
    [   ]cmake-3.2.0-rc1-Darwin-universal.tar.gz2015-02-13 15:03 46M 
    [   ]cmake-3.2.0-rc1-Darwin-x86_64.dmg2015-02-13 15:03 30M 
    [   ]cmake-3.2.0-rc1-Darwin-x86_64.tar.Z2015-02-13 15:03 42M 
    [   ]cmake-3.2.0-rc1-Darwin-x86_64.tar.gz2015-02-13 15:03 29M 
    [TXT]cmake-3.2.0-rc1-Linux-i386.sh2015-02-13 15:03 25M 
    [   ]cmake-3.2.0-rc1-Linux-i386.tar.Z2015-02-13 15:03 36M 
    [   ]cmake-3.2.0-rc1-Linux-i386.tar.gz2015-02-13 15:03 25M 
    [TXT]cmake-3.2.0-rc1-Linux-x86_64.sh2015-02-13 15:03 26M 
    [   ]cmake-3.2.0-rc1-Linux-x86_64.tar.Z2015-02-13 15:03 36M 
    [   ]cmake-3.2.0-rc1-Linux-x86_64.tar.gz2015-02-13 15:02 26M 
    [TXT]cmake-3.2.0-rc1-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.2.0-rc1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.2.0-rc1-win32-x86.exe2015-02-13 15:02 12M 
    [   ]cmake-3.2.0-rc1-win32-x86.zip2015-02-13 15:02 16M 
    [   ]cmake-3.2.0-rc1.tar.Z2015-02-13 15:02 9.9M 
    [   ]cmake-3.2.0-rc1.tar.gz2015-02-13 15:02 6.1M 
    [   ]cmake-3.2.0-rc1.zip2015-02-13 15:02 9.8M 
    [   ]cmake-3.2.0-rc2-Darwin-universal.dmg2015-02-24 08:51 47M 
    [   ]cmake-3.2.0-rc2-Darwin-universal.tar.Z2015-02-24 08:51 66M 
    [   ]cmake-3.2.0-rc2-Darwin-universal.tar.gz2015-02-24 08:51 46M 
    [   ]cmake-3.2.0-rc2-Darwin-x86_64.dmg2015-02-24 08:51 27M 
    [   ]cmake-3.2.0-rc2-Darwin-x86_64.tar.Z2015-02-24 08:51 38M 
    [   ]cmake-3.2.0-rc2-Darwin-x86_64.tar.gz2015-02-24 08:51 26M 
    [TXT]cmake-3.2.0-rc2-Linux-i386.sh2015-02-24 08:50 25M 
    [   ]cmake-3.2.0-rc2-Linux-i386.tar.Z2015-02-24 08:50 36M 
    [   ]cmake-3.2.0-rc2-Linux-i386.tar.gz2015-02-24 08:50 25M 
    [TXT]cmake-3.2.0-rc2-Linux-x86_64.sh2015-02-24 08:50 26M 
    [   ]cmake-3.2.0-rc2-Linux-x86_64.tar.Z2015-02-24 08:50 36M 
    [   ]cmake-3.2.0-rc2-Linux-x86_64.tar.gz2015-02-24 08:50 26M 
    [TXT]cmake-3.2.0-rc2-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.2.0-rc2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.2.0-rc2-win32-x86.exe2015-02-24 08:50 11M 
    [   ]cmake-3.2.0-rc2-win32-x86.zip2015-02-24 08:50 15M 
    [   ]cmake-3.2.0-rc2.tar.Z2015-02-24 08:50 10M 
    [   ]cmake-3.2.0-rc2.tar.gz2015-02-24 08:50 6.1M 
    [   ]cmake-3.2.0-rc2.zip2015-02-24 08:50 9.8M 
    [   ]cmake-3.2.0-win32-x86.exe2015-03-10 09:01 11M 
    [   ]cmake-3.2.0-win32-x86.zip2015-03-10 09:01 15M 
    [   ]cmake-3.2.0.tar.Z2015-03-10 09:01 9.9M 
    [   ]cmake-3.2.0.tar.gz2015-03-10 09:01 6.1M 
    [   ]cmake-3.2.0.zip2015-03-10 09:01 9.7M 
    [   ]cmake-3.2.1-1-src.tar.bz22015-03-11 09:10 4.9M 
    [   ]cmake-3.2.1-1.tar.bz22015-03-11 09:10 9.5M 
    [   ]cmake-3.2.1-Darwin-universal.dmg2015-03-11 09:10 47M 
    [   ]cmake-3.2.1-Darwin-universal.tar.Z2015-03-11 09:10 66M 
    [   ]cmake-3.2.1-Darwin-universal.tar.gz2015-03-11 09:10 46M 
    [   ]cmake-3.2.1-Darwin-x86_64.dmg2015-03-11 09:10 27M 
    [   ]cmake-3.2.1-Darwin-x86_64.tar.Z2015-03-11 09:10 38M 
    [   ]cmake-3.2.1-Darwin-x86_64.tar.gz2015-03-11 09:10 26M 
    [TXT]cmake-3.2.1-Linux-i386.sh2015-03-11 09:10 25M 
    [   ]cmake-3.2.1-Linux-i386.tar.Z2015-03-11 09:09 36M 
    [   ]cmake-3.2.1-Linux-i386.tar.gz2015-03-11 09:09 25M 
    [TXT]cmake-3.2.1-Linux-x86_64.sh2015-03-11 09:09 26M 
    [   ]cmake-3.2.1-Linux-x86_64.tar.Z2015-03-11 09:09 36M 
    [   ]cmake-3.2.1-Linux-x86_64.tar.gz2015-03-11 09:09 26M 
    [TXT]cmake-3.2.1-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.2.1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.2.1-win32-x86.exe2015-03-11 09:09 11M 
    [   ]cmake-3.2.1-win32-x86.zip2015-03-11 09:09 15M 
    [   ]cmake-3.2.1.tar.Z2015-03-11 09:09 10M 
    [   ]cmake-3.2.1.tar.gz2015-03-11 09:09 6.1M 
    [   ]cmake-3.2.1.zip2015-03-11 09:09 9.7M 
    [   ]cmake-3.2.2-Darwin-universal.dmg2015-04-14 13:45 47M 
    [   ]cmake-3.2.2-Darwin-universal.tar.Z2015-04-14 13:45 66M 
    [   ]cmake-3.2.2-Darwin-universal.tar.gz2015-04-14 13:45 46M 
    [   ]cmake-3.2.2-Darwin-x86_64.dmg2015-04-14 13:45 27M 
    [   ]cmake-3.2.2-Darwin-x86_64.tar.Z2015-04-14 13:45 38M 
    [   ]cmake-3.2.2-Darwin-x86_64.tar.gz2015-04-14 13:45 26M 
    [TXT]cmake-3.2.2-Linux-i386.sh2015-04-14 13:44 25M 
    [   ]cmake-3.2.2-Linux-i386.tar.Z2015-04-14 13:44 36M 
    [   ]cmake-3.2.2-Linux-i386.tar.gz2015-04-14 13:44 25M 
    [TXT]cmake-3.2.2-Linux-x86_64.sh2015-04-14 13:44 26M 
    [   ]cmake-3.2.2-Linux-x86_64.tar.Z2015-04-14 13:44 36M 
    [   ]cmake-3.2.2-Linux-x86_64.tar.gz2015-04-14 13:44 26M 
    [TXT]cmake-3.2.2-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.2.2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.2.2-win32-x86.exe2015-04-14 13:44 11M 
    [   ]cmake-3.2.2-win32-x86.zip2015-04-14 13:44 15M 
    [   ]cmake-3.2.2.tar.Z2015-04-14 13:44 9.9M 
    [   ]cmake-3.2.2.tar.gz2015-04-14 13:44 6.1M 
    [   ]cmake-3.2.2.zip2015-04-14 13:44 9.7M 
    [   ]cmake-3.2.3-Darwin-universal.dmg2015-06-01 17:04 47M 
    [   ]cmake-3.2.3-Darwin-universal.tar.Z2015-06-01 17:04 66M 
    [   ]cmake-3.2.3-Darwin-universal.tar.gz2015-06-01 17:04 46M 
    [   ]cmake-3.2.3-Darwin-x86_64.dmg2015-06-01 17:04 27M 
    [   ]cmake-3.2.3-Darwin-x86_64.tar.Z2015-06-01 17:04 38M 
    [   ]cmake-3.2.3-Darwin-x86_64.tar.gz2015-06-01 17:04 26M 
    [TXT]cmake-3.2.3-Linux-i386.sh2015-06-01 17:04 25M 
    [   ]cmake-3.2.3-Linux-i386.tar.Z2015-06-01 17:03 36M 
    [   ]cmake-3.2.3-Linux-i386.tar.gz2015-06-01 17:03 25M 
    [TXT]cmake-3.2.3-Linux-x86_64.sh2015-06-01 17:03 26M 
    [   ]cmake-3.2.3-Linux-x86_64.tar.Z2015-06-01 17:03 36M 
    [   ]cmake-3.2.3-Linux-x86_64.tar.gz2015-06-01 17:03 26M 
    [TXT]cmake-3.2.3-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.2.3-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.2.3-win32-x86.exe2015-06-01 17:03 11M 
    [   ]cmake-3.2.3-win32-x86.zip2015-06-01 17:03 15M 
    [   ]cmake-3.2.3.tar.Z2015-06-01 17:03 9.9M 
    [   ]cmake-3.2.3.tar.gz2015-06-01 17:03 6.1M 
    [   ]cmake-3.2.3.zip2015-06-01 17:03 9.7M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.3/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.3/index.html deleted file mode 100644 index d053736514..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.3/index.html +++ /dev/null @@ -1,163 +0,0 @@ - - - - Index of /files/v3.3 - - -

    Index of /files/v3.3

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.3.0-1-src.tar.bz22015-07-23 16:39 5.0M 
    [   ]cmake-3.3.0-1.tar.bz22015-07-23 16:39 9.8M 
    [   ]cmake-3.3.0-Darwin-universal.dmg2015-07-23 16:39 48M 
    [   ]cmake-3.3.0-Darwin-universal.tar.Z2015-07-23 16:39 68M 
    [   ]cmake-3.3.0-Darwin-universal.tar.gz2015-07-23 16:39 47M 
    [   ]cmake-3.3.0-Darwin-x86_64.dmg2015-07-23 16:39 22M 
    [   ]cmake-3.3.0-Darwin-x86_64.tar.Z2015-07-23 16:39 31M 
    [   ]cmake-3.3.0-Darwin-x86_64.tar.gz2015-07-23 16:38 21M 
    [TXT]cmake-3.3.0-Linux-i386.sh2015-07-23 16:38 26M 
    [   ]cmake-3.3.0-Linux-i386.tar.Z2015-07-23 16:38 37M 
    [   ]cmake-3.3.0-Linux-i386.tar.gz2015-07-23 16:38 26M 
    [TXT]cmake-3.3.0-Linux-x86_64.sh2015-07-23 16:38 27M 
    [   ]cmake-3.3.0-Linux-x86_64.tar.Z2015-07-23 16:38 37M 
    [   ]cmake-3.3.0-Linux-x86_64.tar.gz2015-07-23 16:38 27M 
    [TXT]cmake-3.3.0-SHA-256.txt2015-08-13 14:42 1.6K 
    [TXT]cmake-3.3.0-SHA-256.txt.asc2015-08-13 14:42 819  
    [   ]cmake-3.3.0-rc1-1-src.tar.bz22015-06-05 09:01 9.8M 
    [TXT]cmake-3.3.0-rc1-1.patch2015-06-05 09:01 0  
    [TXT]cmake-3.3.0-rc1-1.sh2015-06-05 09:01 1.5K 
    [   ]cmake-3.3.0-rc1-1.tar.bz22015-06-05 09:01 9.8M 
    [   ]cmake-3.3.0-rc1-Darwin-universal.dmg2015-06-05 09:01 48M 
    [   ]cmake-3.3.0-rc1-Darwin-universal.tar.Z2015-06-05 09:01 68M 
    [   ]cmake-3.3.0-rc1-Darwin-universal.tar.gz2015-06-05 09:01 47M 
    [   ]cmake-3.3.0-rc1-Darwin-x86_64.dmg2015-06-05 09:01 27M 
    [   ]cmake-3.3.0-rc1-Darwin-x86_64.tar.Z2015-06-05 09:01 38M 
    [   ]cmake-3.3.0-rc1-Darwin-x86_64.tar.gz2015-06-05 09:01 27M 
    [TXT]cmake-3.3.0-rc1-Linux-i386.sh2015-06-05 09:00 26M 
    [   ]cmake-3.3.0-rc1-Linux-i386.tar.Z2015-06-05 09:00 36M 
    [   ]cmake-3.3.0-rc1-Linux-i386.tar.gz2015-06-05 09:00 26M 
    [TXT]cmake-3.3.0-rc1-Linux-x86_64.sh2015-06-05 09:00 26M 
    [   ]cmake-3.3.0-rc1-Linux-x86_64.tar.Z2015-06-05 09:00 37M 
    [   ]cmake-3.3.0-rc1-Linux-x86_64.tar.gz2015-06-05 09:00 26M 
    [TXT]cmake-3.3.0-rc1-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.3.0-rc1-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.3.0-rc1-win32-x86.exe2015-06-05 09:00 12M 
    [   ]cmake-3.3.0-rc1-win32-x86.zip2015-06-05 09:00 16M 
    [   ]cmake-3.3.0-rc1.tar.Z2015-06-05 09:00 10M 
    [   ]cmake-3.3.0-rc1.tar.gz2015-06-05 09:00 6.3M 
    [   ]cmake-3.3.0-rc1.zip2015-06-05 09:00 10M 
    [   ]cmake-3.3.0-rc2-1-src.tar.bz22015-06-10 15:27 5.0M 
    [   ]cmake-3.3.0-rc2-1.tar.bz22015-06-10 15:27 9.8M 
    [   ]cmake-3.3.0-rc2-Darwin-universal.dmg2015-06-10 15:27 48M 
    [   ]cmake-3.3.0-rc2-Darwin-universal.tar.Z2015-06-10 15:27 68M 
    [   ]cmake-3.3.0-rc2-Darwin-universal.tar.gz2015-06-10 15:27 47M 
    [   ]cmake-3.3.0-rc2-Darwin-x86_64.dmg2015-06-10 15:27 21M 
    [   ]cmake-3.3.0-rc2-Darwin-x86_64.tar.Z2015-06-10 15:27 31M 
    [   ]cmake-3.3.0-rc2-Darwin-x86_64.tar.gz2015-06-10 15:27 21M 
    [TXT]cmake-3.3.0-rc2-Linux-i386.sh2015-06-10 15:27 26M 
    [   ]cmake-3.3.0-rc2-Linux-i386.tar.Z2015-06-10 15:27 36M 
    [   ]cmake-3.3.0-rc2-Linux-i386.tar.gz2015-06-10 15:27 26M 
    [TXT]cmake-3.3.0-rc2-Linux-x86_64.sh2015-06-10 15:27 26M 
    [   ]cmake-3.3.0-rc2-Linux-x86_64.tar.Z2015-06-10 15:27 37M 
    [   ]cmake-3.3.0-rc2-Linux-x86_64.tar.gz2015-06-10 15:27 26M 
    [TXT]cmake-3.3.0-rc2-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.3.0-rc2-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.3.0-rc2-win32-x86.exe2015-06-10 15:27 12M 
    [   ]cmake-3.3.0-rc2-win32-x86.zip2015-06-10 15:27 16M 
    [   ]cmake-3.3.0-rc2.tar.Z2015-06-10 15:27 10M 
    [   ]cmake-3.3.0-rc2.tar.gz2015-06-10 15:27 6.3M 
    [   ]cmake-3.3.0-rc2.zip2015-06-10 15:27 10M 
    [   ]cmake-3.3.0-rc3-1-src.tar.bz22015-06-26 13:34 5.0M 
    [   ]cmake-3.3.0-rc3-1.tar.bz22015-06-26 13:34 9.8M 
    [   ]cmake-3.3.0-rc3-Darwin-universal.dmg2015-06-26 13:34 48M 
    [   ]cmake-3.3.0-rc3-Darwin-universal.tar.Z2015-06-26 13:34 68M 
    [   ]cmake-3.3.0-rc3-Darwin-universal.tar.gz2015-06-26 13:34 47M 
    [   ]cmake-3.3.0-rc3-Darwin-x86_64.dmg2015-06-26 13:34 22M 
    [   ]cmake-3.3.0-rc3-Darwin-x86_64.tar.Z2015-06-26 13:34 31M 
    [   ]cmake-3.3.0-rc3-Darwin-x86_64.tar.gz2015-06-26 13:34 21M 
    [TXT]cmake-3.3.0-rc3-Linux-i386.sh2015-06-26 13:34 26M 
    [   ]cmake-3.3.0-rc3-Linux-i386.tar.Z2015-06-26 13:34 37M 
    [   ]cmake-3.3.0-rc3-Linux-i386.tar.gz2015-06-26 13:34 26M 
    [TXT]cmake-3.3.0-rc3-Linux-x86_64.sh2015-06-26 13:34 27M 
    [   ]cmake-3.3.0-rc3-Linux-x86_64.tar.Z2015-06-26 13:34 37M 
    [   ]cmake-3.3.0-rc3-Linux-x86_64.tar.gz2015-06-26 13:34 27M 
    [TXT]cmake-3.3.0-rc3-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.3.0-rc3-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.3.0-rc3-win32-x86.exe2015-06-26 13:34 12M 
    [   ]cmake-3.3.0-rc3-win32-x86.zip2015-06-26 13:34 16M 
    [   ]cmake-3.3.0-rc3.tar.Z2015-06-26 13:34 10M 
    [   ]cmake-3.3.0-rc3.tar.gz2015-06-26 13:34 6.3M 
    [   ]cmake-3.3.0-rc3.zip2015-06-26 13:34 10M 
    [   ]cmake-3.3.0-rc4-1-src.tar.bz22015-07-13 15:56 5.0M 
    [   ]cmake-3.3.0-rc4-1.tar.bz22015-07-13 15:56 9.8M 
    [   ]cmake-3.3.0-rc4-Darwin-universal.dmg2015-07-13 15:56 48M 
    [   ]cmake-3.3.0-rc4-Darwin-universal.tar.Z2015-07-13 15:56 68M 
    [   ]cmake-3.3.0-rc4-Darwin-universal.tar.gz2015-07-13 15:55 47M 
    [   ]cmake-3.3.0-rc4-Darwin-x86_64.dmg2015-07-13 15:55 22M 
    [   ]cmake-3.3.0-rc4-Darwin-x86_64.tar.Z2015-07-13 15:55 31M 
    [   ]cmake-3.3.0-rc4-Darwin-x86_64.tar.gz2015-07-13 15:55 21M 
    [TXT]cmake-3.3.0-rc4-Linux-i386.sh2015-07-13 15:55 26M 
    [   ]cmake-3.3.0-rc4-Linux-i386.tar.Z2015-07-13 15:55 37M 
    [   ]cmake-3.3.0-rc4-Linux-i386.tar.gz2015-07-13 15:55 26M 
    [TXT]cmake-3.3.0-rc4-Linux-x86_64.sh2015-07-13 15:55 27M 
    [   ]cmake-3.3.0-rc4-Linux-x86_64.tar.Z2015-07-13 15:55 37M 
    [   ]cmake-3.3.0-rc4-Linux-x86_64.tar.gz2015-07-13 15:55 27M 
    [TXT]cmake-3.3.0-rc4-SHA-256.txt2016-04-13 12:48 1.6K 
    [TXT]cmake-3.3.0-rc4-SHA-256.txt.asc2016-04-13 12:48 819  
    [   ]cmake-3.3.0-rc4-win32-x86.exe2015-07-13 15:55 12M 
    [   ]cmake-3.3.0-rc4-win32-x86.zip2015-07-13 15:55 16M 
    [   ]cmake-3.3.0-rc4.tar.Z2015-07-13 15:55 10M 
    [   ]cmake-3.3.0-rc4.tar.gz2015-07-13 15:55 6.3M 
    [   ]cmake-3.3.0-rc4.zip2015-07-13 15:55 10M 
    [   ]cmake-3.3.0-win32-x86.exe2015-07-23 16:38 12M 
    [   ]cmake-3.3.0-win32-x86.zip2015-07-23 16:38 16M 
    [   ]cmake-3.3.0.tar.Z2015-07-23 16:38 10M 
    [   ]cmake-3.3.0.tar.gz2015-07-23 16:38 6.3M 
    [   ]cmake-3.3.0.zip2015-07-23 16:38 10M 
    [   ]cmake-3.3.1-1-src.tar.bz22015-08-13 15:55 5.0M 
    [   ]cmake-3.3.1-1.tar.bz22015-08-13 15:55 9.8M 
    [   ]cmake-3.3.1-Darwin-universal.dmg2015-08-13 15:55 48M 
    [   ]cmake-3.3.1-Darwin-universal.tar.Z2015-08-13 15:55 68M 
    [   ]cmake-3.3.1-Darwin-universal.tar.gz2015-08-13 15:55 47M 
    [   ]cmake-3.3.1-Darwin-x86_64.dmg2015-08-13 15:55 22M 
    [   ]cmake-3.3.1-Darwin-x86_64.tar.Z2015-08-13 15:55 31M 
    [   ]cmake-3.3.1-Darwin-x86_64.tar.gz2015-08-13 15:55 21M 
    [TXT]cmake-3.3.1-Linux-i386.sh2015-08-13 15:55 26M 
    [   ]cmake-3.3.1-Linux-i386.tar.Z2015-08-13 15:55 37M 
    [   ]cmake-3.3.1-Linux-i386.tar.gz2015-08-13 15:55 26M 
    [TXT]cmake-3.3.1-Linux-x86_64.sh2015-08-13 15:55 27M 
    [   ]cmake-3.3.1-Linux-x86_64.tar.Z2015-08-13 15:55 37M 
    [   ]cmake-3.3.1-Linux-x86_64.tar.gz2015-08-13 15:55 27M 
    [TXT]cmake-3.3.1-SHA-256.txt2015-08-13 15:55 1.3K 
    [TXT]cmake-3.3.1-SHA-256.txt.asc2015-08-13 15:55 819  
    [   ]cmake-3.3.1-win32-x86.exe2015-08-13 15:55 12M 
    [   ]cmake-3.3.1-win32-x86.zip2015-08-13 15:55 16M 
    [   ]cmake-3.3.1.tar.Z2015-08-13 15:54 10M 
    [   ]cmake-3.3.1.tar.gz2015-08-13 15:54 6.3M 
    [   ]cmake-3.3.1.zip2015-08-13 15:54 10M 
    [   ]cmake-3.3.2-1-src.tar.bz22015-09-17 14:36 5.0M 
    [   ]cmake-3.3.2-1.tar.bz22015-09-17 14:36 9.8M 
    [   ]cmake-3.3.2-Darwin-universal.dmg2015-09-17 14:36 48M 
    [   ]cmake-3.3.2-Darwin-universal.tar.Z2015-09-17 14:36 68M 
    [   ]cmake-3.3.2-Darwin-universal.tar.gz2015-09-17 14:36 47M 
    [   ]cmake-3.3.2-Darwin-x86_64.dmg2015-09-17 14:35 22M 
    [   ]cmake-3.3.2-Darwin-x86_64.tar.Z2015-09-17 14:35 31M 
    [   ]cmake-3.3.2-Darwin-x86_64.tar.gz2015-09-17 14:35 21M 
    [TXT]cmake-3.3.2-Linux-i386.sh2015-09-17 14:35 26M 
    [   ]cmake-3.3.2-Linux-i386.tar.Z2015-09-17 14:35 37M 
    [   ]cmake-3.3.2-Linux-i386.tar.gz2015-09-17 14:35 26M 
    [TXT]cmake-3.3.2-Linux-x86_64.sh2015-09-17 14:35 27M 
    [   ]cmake-3.3.2-Linux-x86_64.tar.Z2015-09-17 14:35 37M 
    [   ]cmake-3.3.2-Linux-x86_64.tar.gz2015-09-17 14:35 27M 
    [TXT]cmake-3.3.2-SHA-256.txt2015-09-17 14:35 1.6K 
    [TXT]cmake-3.3.2-SHA-256.txt.asc2015-09-17 14:35 819  
    [   ]cmake-3.3.2-win32-x86.exe2015-09-17 14:35 12M 
    [   ]cmake-3.3.2-win32-x86.zip2015-09-17 14:35 16M 
    [   ]cmake-3.3.2.tar.Z2015-09-17 14:35 10M 
    [   ]cmake-3.3.2.tar.gz2015-09-17 14:35 6.3M 
    [   ]cmake-3.3.2.zip2015-09-17 14:35 10M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.4/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.4/index.html deleted file mode 100644 index e7b56da758..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.4/index.html +++ /dev/null @@ -1,127 +0,0 @@ - - - - Index of /files/v3.4 - - -

    Index of /files/v3.4

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.4.0-Darwin-x86_64.dmg2015-11-12 13:42 22M 
    [   ]cmake-3.4.0-Darwin-x86_64.tar.Z2015-11-12 13:42 31M 
    [   ]cmake-3.4.0-Darwin-x86_64.tar.gz2015-11-12 13:42 22M 
    [TXT]cmake-3.4.0-Linux-i386.sh2015-11-12 13:42 26M 
    [   ]cmake-3.4.0-Linux-i386.tar.Z2015-11-12 13:42 37M 
    [   ]cmake-3.4.0-Linux-i386.tar.gz2015-11-12 13:42 26M 
    [TXT]cmake-3.4.0-Linux-x86_64.sh2015-11-12 13:42 27M 
    [   ]cmake-3.4.0-Linux-x86_64.tar.Z2015-11-12 13:42 38M 
    [   ]cmake-3.4.0-Linux-x86_64.tar.gz2015-11-12 13:42 27M 
    [TXT]cmake-3.4.0-SHA-256.txt2015-11-12 13:42 1.3K 
    [TXT]cmake-3.4.0-SHA-256.txt.asc2015-11-12 13:42 819  
    [   ]cmake-3.4.0-rc1-Darwin-x86_64.dmg2015-10-06 11:02 22M 
    [   ]cmake-3.4.0-rc1-Darwin-x86_64.tar.Z2015-10-06 11:02 31M 
    [   ]cmake-3.4.0-rc1-Darwin-x86_64.tar.gz2015-10-06 11:02 22M 
    [TXT]cmake-3.4.0-rc1-Linux-i386.sh2015-10-06 11:02 26M 
    [   ]cmake-3.4.0-rc1-Linux-i386.tar.Z2015-10-06 11:02 37M 
    [   ]cmake-3.4.0-rc1-Linux-i386.tar.gz2015-10-06 11:01 26M 
    [TXT]cmake-3.4.0-rc1-Linux-x86_64.sh2015-10-06 11:01 27M 
    [   ]cmake-3.4.0-rc1-Linux-x86_64.tar.Z2015-10-06 11:01 38M 
    [   ]cmake-3.4.0-rc1-Linux-x86_64.tar.gz2015-10-06 11:01 27M 
    [TXT]cmake-3.4.0-rc1-SHA-256.txt2015-10-06 11:01 1.3K 
    [TXT]cmake-3.4.0-rc1-SHA-256.txt.asc2015-10-06 11:01 819  
    [   ]cmake-3.4.0-rc1-win32-x86.exe2015-10-06 11:01 13M 
    [   ]cmake-3.4.0-rc1-win32-x86.zip2015-10-06 11:01 16M 
    [   ]cmake-3.4.0-rc1.tar.Z2015-10-06 11:01 10M 
    [   ]cmake-3.4.0-rc1.tar.gz2015-10-06 11:01 6.4M 
    [   ]cmake-3.4.0-rc1.zip2015-10-06 11:01 11M 
    [   ]cmake-3.4.0-rc2-Darwin-x86_64.dmg2015-10-21 16:27 22M 
    [   ]cmake-3.4.0-rc2-Darwin-x86_64.tar.Z2015-10-21 16:27 31M 
    [   ]cmake-3.4.0-rc2-Darwin-x86_64.tar.gz2015-10-21 16:27 22M 
    [TXT]cmake-3.4.0-rc2-Linux-i386.sh2015-10-21 16:27 26M 
    [   ]cmake-3.4.0-rc2-Linux-i386.tar.Z2015-10-21 16:27 37M 
    [   ]cmake-3.4.0-rc2-Linux-i386.tar.gz2015-10-21 16:27 26M 
    [TXT]cmake-3.4.0-rc2-Linux-x86_64.sh2015-10-21 16:27 27M 
    [   ]cmake-3.4.0-rc2-Linux-x86_64.tar.Z2015-10-21 16:27 38M 
    [   ]cmake-3.4.0-rc2-Linux-x86_64.tar.gz2015-10-21 16:27 27M 
    [TXT]cmake-3.4.0-rc2-SHA-256.txt2015-10-21 16:26 1.3K 
    [TXT]cmake-3.4.0-rc2-SHA-256.txt.asc2015-10-21 16:26 819  
    [   ]cmake-3.4.0-rc2-win32-x86.exe2015-10-21 16:26 13M 
    [   ]cmake-3.4.0-rc2-win32-x86.zip2015-10-21 16:26 16M 
    [   ]cmake-3.4.0-rc2.tar.Z2015-10-21 16:26 10M 
    [   ]cmake-3.4.0-rc2.tar.gz2015-10-21 16:26 6.4M 
    [   ]cmake-3.4.0-rc2.zip2015-10-21 16:26 11M 
    [   ]cmake-3.4.0-rc3-Darwin-x86_64.dmg2015-11-03 11:09 22M 
    [   ]cmake-3.4.0-rc3-Darwin-x86_64.tar.Z2015-11-03 11:09 31M 
    [   ]cmake-3.4.0-rc3-Darwin-x86_64.tar.gz2015-11-03 11:09 22M 
    [TXT]cmake-3.4.0-rc3-Linux-i386.sh2015-11-03 11:09 26M 
    [   ]cmake-3.4.0-rc3-Linux-i386.tar.Z2015-11-03 11:09 37M 
    [   ]cmake-3.4.0-rc3-Linux-i386.tar.gz2015-11-03 11:09 26M 
    [TXT]cmake-3.4.0-rc3-Linux-x86_64.sh2015-11-03 11:09 27M 
    [   ]cmake-3.4.0-rc3-Linux-x86_64.tar.Z2015-11-03 11:09 38M 
    [   ]cmake-3.4.0-rc3-Linux-x86_64.tar.gz2015-11-03 11:09 27M 
    [TXT]cmake-3.4.0-rc3-SHA-256.txt2015-11-03 11:09 1.3K 
    [TXT]cmake-3.4.0-rc3-SHA-256.txt.asc2015-11-03 11:09 819  
    [   ]cmake-3.4.0-rc3-win32-x86.exe2015-11-03 11:09 13M 
    [   ]cmake-3.4.0-rc3-win32-x86.zip2015-11-03 11:09 16M 
    [   ]cmake-3.4.0-rc3.tar.Z2015-11-03 11:09 10M 
    [   ]cmake-3.4.0-rc3.tar.gz2015-11-03 11:08 6.4M 
    [   ]cmake-3.4.0-rc3.zip2015-11-03 11:08 11M 
    [   ]cmake-3.4.0-win32-x86.exe2015-11-12 13:42 13M 
    [   ]cmake-3.4.0-win32-x86.zip2015-11-12 13:42 16M 
    [   ]cmake-3.4.0.tar.Z2015-11-12 13:42 10M 
    [   ]cmake-3.4.0.tar.gz2015-11-12 13:42 6.4M 
    [   ]cmake-3.4.0.zip2015-11-12 13:42 10M 
    [   ]cmake-3.4.1-Darwin-x86_64.dmg2015-12-02 14:42 22M 
    [   ]cmake-3.4.1-Darwin-x86_64.tar.Z2015-12-02 14:42 31M 
    [   ]cmake-3.4.1-Darwin-x86_64.tar.gz2015-12-02 14:42 22M 
    [TXT]cmake-3.4.1-Linux-i386.sh2015-12-02 14:42 26M 
    [   ]cmake-3.4.1-Linux-i386.tar.Z2015-12-02 14:42 37M 
    [   ]cmake-3.4.1-Linux-i386.tar.gz2015-12-02 14:42 26M 
    [TXT]cmake-3.4.1-Linux-x86_64.sh2015-12-02 14:42 27M 
    [   ]cmake-3.4.1-Linux-x86_64.tar.Z2015-12-02 14:42 38M 
    [   ]cmake-3.4.1-Linux-x86_64.tar.gz2015-12-02 14:42 27M 
    [TXT]cmake-3.4.1-SHA-256.txt2015-12-02 14:42 1.3K 
    [TXT]cmake-3.4.1-SHA-256.txt.asc2015-12-02 14:42 819  
    [   ]cmake-3.4.1-win32-x86.exe2015-12-02 14:42 13M 
    [   ]cmake-3.4.1-win32-x86.zip2015-12-02 14:42 16M 
    [   ]cmake-3.4.1.tar.Z2015-12-02 14:42 10M 
    [   ]cmake-3.4.1.tar.gz2015-12-02 14:42 6.4M 
    [   ]cmake-3.4.1.zip2015-12-02 14:41 10M 
    [   ]cmake-3.4.2-Darwin-x86_64.dmg2016-01-19 14:58 22M 
    [   ]cmake-3.4.2-Darwin-x86_64.tar.Z2016-01-19 14:58 31M 
    [   ]cmake-3.4.2-Darwin-x86_64.tar.gz2016-01-19 14:58 22M 
    [TXT]cmake-3.4.2-Linux-i386.sh2016-01-19 14:58 26M 
    [   ]cmake-3.4.2-Linux-i386.tar.Z2016-01-19 14:58 37M 
    [   ]cmake-3.4.2-Linux-i386.tar.gz2016-01-19 14:58 26M 
    [TXT]cmake-3.4.2-Linux-x86_64.sh2016-01-19 14:58 27M 
    [   ]cmake-3.4.2-Linux-x86_64.tar.Z2016-01-19 14:58 38M 
    [   ]cmake-3.4.2-Linux-x86_64.tar.gz2016-01-19 14:58 27M 
    [TXT]cmake-3.4.2-SHA-256.txt2016-01-19 14:58 1.3K 
    [TXT]cmake-3.4.2-SHA-256.txt.asc2016-01-19 14:58 819  
    [   ]cmake-3.4.2-win32-x86.exe2016-01-19 14:58 13M 
    [   ]cmake-3.4.2-win32-x86.zip2016-01-19 14:58 16M 
    [   ]cmake-3.4.2.tar.Z2016-01-19 14:58 10M 
    [   ]cmake-3.4.2.tar.gz2016-01-19 14:58 6.4M 
    [   ]cmake-3.4.2.zip2016-01-19 14:58 10M 
    [   ]cmake-3.4.3-Darwin-x86_64.dmg2016-01-25 14:29 22M 
    [   ]cmake-3.4.3-Darwin-x86_64.tar.Z2016-01-25 14:29 31M 
    [   ]cmake-3.4.3-Darwin-x86_64.tar.gz2016-01-25 14:29 22M 
    [TXT]cmake-3.4.3-Linux-i386.sh2016-01-25 14:29 26M 
    [   ]cmake-3.4.3-Linux-i386.tar.Z2016-01-25 14:29 37M 
    [   ]cmake-3.4.3-Linux-i386.tar.gz2016-01-25 14:29 26M 
    [TXT]cmake-3.4.3-Linux-x86_64.sh2016-01-25 14:29 27M 
    [   ]cmake-3.4.3-Linux-x86_64.tar.Z2016-01-25 14:29 38M 
    [   ]cmake-3.4.3-Linux-x86_64.tar.gz2016-01-25 14:29 27M 
    [TXT]cmake-3.4.3-SHA-256.txt2016-01-25 14:29 1.3K 
    [TXT]cmake-3.4.3-SHA-256.txt.asc2016-01-25 14:29 819  
    [   ]cmake-3.4.3-win32-x86.exe2016-01-25 14:29 13M 
    [   ]cmake-3.4.3-win32-x86.zip2016-01-25 14:29 16M 
    [   ]cmake-3.4.3.tar.Z2016-01-25 14:29 10M 
    [   ]cmake-3.4.3.tar.gz2016-01-25 14:29 6.4M 
    [   ]cmake-3.4.3.zip2016-01-25 14:29 10M 
    [DIR]cygwin/2016-01-25 14:34 -  

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.5/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.5/index.html deleted file mode 100644 index 03d4f7c151..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.5/index.html +++ /dev/null @@ -1,111 +0,0 @@ - - - - Index of /files/v3.5 - - -

    Index of /files/v3.5

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.5.0-Darwin-x86_64.dmg2016-03-08 11:17 22M 
    [   ]cmake-3.5.0-Darwin-x86_64.tar.Z2016-03-08 11:17 31M 
    [   ]cmake-3.5.0-Darwin-x86_64.tar.gz2016-03-08 11:17 22M 
    [TXT]cmake-3.5.0-Linux-i386.sh2016-03-08 11:17 27M 
    [   ]cmake-3.5.0-Linux-i386.tar.Z2016-03-08 11:17 38M 
    [   ]cmake-3.5.0-Linux-i386.tar.gz2016-03-08 11:17 27M 
    [TXT]cmake-3.5.0-Linux-x86_64.sh2016-03-08 11:17 27M 
    [   ]cmake-3.5.0-Linux-x86_64.tar.Z2016-03-08 11:17 38M 
    [   ]cmake-3.5.0-Linux-x86_64.tar.gz2016-03-08 11:17 27M 
    [TXT]cmake-3.5.0-SHA-256.txt2016-03-08 11:17 1.3K 
    [TXT]cmake-3.5.0-SHA-256.txt.asc2016-03-08 11:16 819  
    [   ]cmake-3.5.0-rc1-Darwin-x86_64.dmg2016-02-02 15:51 22M 
    [   ]cmake-3.5.0-rc1-Darwin-x86_64.tar.Z2016-02-02 15:51 31M 
    [   ]cmake-3.5.0-rc1-Darwin-x86_64.tar.gz2016-02-02 15:51 22M 
    [TXT]cmake-3.5.0-rc1-Linux-i386.sh2016-02-02 15:51 27M 
    [   ]cmake-3.5.0-rc1-Linux-i386.tar.Z2016-02-02 15:51 38M 
    [   ]cmake-3.5.0-rc1-Linux-i386.tar.gz2016-02-02 15:51 27M 
    [TXT]cmake-3.5.0-rc1-Linux-x86_64.sh2016-02-02 15:51 27M 
    [   ]cmake-3.5.0-rc1-Linux-x86_64.tar.Z2016-02-02 15:51 38M 
    [   ]cmake-3.5.0-rc1-Linux-x86_64.tar.gz2016-02-02 15:51 27M 
    [TXT]cmake-3.5.0-rc1-SHA-256.txt2016-02-03 13:29 1.3K 
    [TXT]cmake-3.5.0-rc1-SHA-256.txt.asc2016-02-03 13:29 819  
    [   ]cmake-3.5.0-rc1-win32-x86.msi2016-02-03 13:29 15M 
    [   ]cmake-3.5.0-rc1-win32-x86.zip2016-02-03 13:29 20M 
    [   ]cmake-3.5.0-rc1.tar.Z2016-02-02 15:51 11M 
    [   ]cmake-3.5.0-rc1.tar.gz2016-02-02 15:51 6.5M 
    [   ]cmake-3.5.0-rc1.zip2016-02-02 15:51 11M 
    [   ]cmake-3.5.0-rc2-Darwin-x86_64.dmg2016-02-10 15:03 22M 
    [   ]cmake-3.5.0-rc2-Darwin-x86_64.tar.Z2016-02-10 15:03 31M 
    [   ]cmake-3.5.0-rc2-Darwin-x86_64.tar.gz2016-02-10 15:03 22M 
    [TXT]cmake-3.5.0-rc2-Linux-i386.sh2016-02-10 15:03 27M 
    [   ]cmake-3.5.0-rc2-Linux-i386.tar.Z2016-02-10 15:03 38M 
    [   ]cmake-3.5.0-rc2-Linux-i386.tar.gz2016-02-10 15:03 27M 
    [TXT]cmake-3.5.0-rc2-Linux-x86_64.sh2016-02-10 15:03 27M 
    [   ]cmake-3.5.0-rc2-Linux-x86_64.tar.Z2016-02-10 15:03 38M 
    [   ]cmake-3.5.0-rc2-Linux-x86_64.tar.gz2016-02-10 15:03 27M 
    [TXT]cmake-3.5.0-rc2-SHA-256.txt2016-02-10 15:03 1.3K 
    [TXT]cmake-3.5.0-rc2-SHA-256.txt.asc2016-02-10 15:03 819  
    [   ]cmake-3.5.0-rc2-win32-x86.msi2016-02-10 15:03 15M 
    [   ]cmake-3.5.0-rc2-win32-x86.zip2016-02-10 15:03 20M 
    [   ]cmake-3.5.0-rc2.tar.Z2016-02-10 15:02 11M 
    [   ]cmake-3.5.0-rc2.tar.gz2016-02-10 15:02 6.5M 
    [   ]cmake-3.5.0-rc2.zip2016-02-10 15:02 11M 
    [   ]cmake-3.5.0-rc3-Darwin-x86_64.dmg2016-02-18 15:41 22M 
    [   ]cmake-3.5.0-rc3-Darwin-x86_64.tar.Z2016-02-18 15:41 31M 
    [   ]cmake-3.5.0-rc3-Darwin-x86_64.tar.gz2016-02-18 15:41 22M 
    [TXT]cmake-3.5.0-rc3-Linux-i386.sh2016-02-18 15:41 27M 
    [   ]cmake-3.5.0-rc3-Linux-i386.tar.Z2016-02-18 15:41 38M 
    [   ]cmake-3.5.0-rc3-Linux-i386.tar.gz2016-02-18 15:41 27M 
    [TXT]cmake-3.5.0-rc3-Linux-x86_64.sh2016-02-18 15:41 27M 
    [   ]cmake-3.5.0-rc3-Linux-x86_64.tar.Z2016-02-18 15:41 38M 
    [   ]cmake-3.5.0-rc3-Linux-x86_64.tar.gz2016-02-18 15:41 27M 
    [TXT]cmake-3.5.0-rc3-SHA-256.txt2016-02-18 15:41 1.3K 
    [TXT]cmake-3.5.0-rc3-SHA-256.txt.asc2016-02-18 15:41 819  
    [   ]cmake-3.5.0-rc3-win32-x86.msi2016-02-18 15:41 15M 
    [   ]cmake-3.5.0-rc3-win32-x86.zip2016-02-18 15:41 20M 
    [   ]cmake-3.5.0-rc3.tar.Z2016-02-18 15:41 11M 
    [   ]cmake-3.5.0-rc3.tar.gz2016-02-18 15:40 6.5M 
    [   ]cmake-3.5.0-rc3.zip2016-02-18 15:40 11M 
    [   ]cmake-3.5.0-win32-x86.msi2016-03-08 11:16 15M 
    [   ]cmake-3.5.0-win32-x86.zip2016-03-08 11:16 20M 
    [   ]cmake-3.5.0.tar.Z2016-03-08 11:16 11M 
    [   ]cmake-3.5.0.tar.gz2016-03-08 11:16 6.5M 
    [   ]cmake-3.5.0.zip2016-03-08 11:16 11M 
    [   ]cmake-3.5.1-Darwin-x86_64.dmg2016-03-24 16:00 22M 
    [   ]cmake-3.5.1-Darwin-x86_64.tar.Z2016-03-24 16:00 31M 
    [   ]cmake-3.5.1-Darwin-x86_64.tar.gz2016-03-24 16:00 22M 
    [TXT]cmake-3.5.1-Linux-i386.sh2016-03-24 16:00 27M 
    [   ]cmake-3.5.1-Linux-i386.tar.Z2016-03-24 16:00 38M 
    [   ]cmake-3.5.1-Linux-i386.tar.gz2016-03-24 16:00 27M 
    [TXT]cmake-3.5.1-Linux-x86_64.sh2016-03-24 16:00 27M 
    [   ]cmake-3.5.1-Linux-x86_64.tar.Z2016-03-24 16:00 38M 
    [   ]cmake-3.5.1-Linux-x86_64.tar.gz2016-03-24 16:00 27M 
    [TXT]cmake-3.5.1-SHA-256.txt2016-03-24 16:00 1.3K 
    [TXT]cmake-3.5.1-SHA-256.txt.asc2016-03-24 16:00 819  
    [   ]cmake-3.5.1-win32-x86.msi2016-03-24 16:00 15M 
    [   ]cmake-3.5.1-win32-x86.zip2016-03-24 16:00 20M 
    [   ]cmake-3.5.1.tar.Z2016-03-24 16:00 11M 
    [   ]cmake-3.5.1.tar.gz2016-03-24 16:00 6.5M 
    [   ]cmake-3.5.1.zip2016-03-24 16:00 11M 
    [   ]cmake-3.5.2-Darwin-x86_64.dmg2016-04-15 13:40 22M 
    [   ]cmake-3.5.2-Darwin-x86_64.tar.Z2016-04-15 13:40 31M 
    [   ]cmake-3.5.2-Darwin-x86_64.tar.gz2016-04-15 13:40 22M 
    [TXT]cmake-3.5.2-Linux-i386.sh2016-04-15 13:40 27M 
    [   ]cmake-3.5.2-Linux-i386.tar.Z2016-04-15 13:40 38M 
    [   ]cmake-3.5.2-Linux-i386.tar.gz2016-04-15 13:40 27M 
    [TXT]cmake-3.5.2-Linux-x86_64.sh2016-04-15 13:40 27M 
    [   ]cmake-3.5.2-Linux-x86_64.tar.Z2016-04-15 13:40 38M 
    [   ]cmake-3.5.2-Linux-x86_64.tar.gz2016-04-15 13:40 27M 
    [TXT]cmake-3.5.2-SHA-256.txt2016-04-15 13:40 1.3K 
    [TXT]cmake-3.5.2-SHA-256.txt.asc2016-04-15 13:40 819  
    [   ]cmake-3.5.2-win32-x86.msi2016-04-15 13:40 15M 
    [   ]cmake-3.5.2-win32-x86.zip2016-04-15 13:40 20M 
    [   ]cmake-3.5.2.tar.Z2016-04-15 13:40 11M 
    [   ]cmake-3.5.2.tar.gz2016-04-15 13:40 6.5M 
    [   ]cmake-3.5.2.zip2016-04-15 13:40 11M 
    [DIR]cygwin/2016-04-15 13:42 -  

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.6/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.6/index.html deleted file mode 100644 index 4fca3a5fcd..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.6/index.html +++ /dev/null @@ -1,159 +0,0 @@ - - - - Index of /files/v3.6 - - -

    Index of /files/v3.6

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.6.0-Darwin-x86_64.dmg2016-07-07 13:05 25M 
    [   ]cmake-3.6.0-Darwin-x86_64.tar.Z2016-07-07 13:05 36M 
    [   ]cmake-3.6.0-Darwin-x86_64.tar.gz2016-07-07 13:05 25M 
    [TXT]cmake-3.6.0-Linux-i386.sh2016-07-07 13:05 27M 
    [   ]cmake-3.6.0-Linux-i386.tar.Z2016-07-07 13:05 38M 
    [   ]cmake-3.6.0-Linux-i386.tar.gz2016-07-07 13:05 27M 
    [TXT]cmake-3.6.0-Linux-x86_64.sh2016-07-07 13:05 27M 
    [   ]cmake-3.6.0-Linux-x86_64.tar.Z2016-07-07 13:05 38M 
    [   ]cmake-3.6.0-Linux-x86_64.tar.gz2016-07-07 13:05 27M 
    [TXT]cmake-3.6.0-SHA-256.txt2016-07-07 13:05 1.4K 
    [TXT]cmake-3.6.0-SHA-256.txt.asc2016-07-07 13:05 819  
    [   ]cmake-3.6.0-rc1-Darwin-x86_64.dmg2016-06-03 14:50 25M 
    [   ]cmake-3.6.0-rc1-Darwin-x86_64.tar.Z2016-06-03 14:50 36M 
    [   ]cmake-3.6.0-rc1-Darwin-x86_64.tar.gz2016-06-03 14:50 25M 
    [TXT]cmake-3.6.0-rc1-Linux-i386.sh2016-06-03 14:50 27M 
    [   ]cmake-3.6.0-rc1-Linux-i386.tar.Z2016-06-03 14:50 38M 
    [   ]cmake-3.6.0-rc1-Linux-i386.tar.gz2016-06-03 14:50 27M 
    [TXT]cmake-3.6.0-rc1-Linux-x86_64.sh2016-06-03 14:50 27M 
    [   ]cmake-3.6.0-rc1-Linux-x86_64.tar.Z2016-06-03 14:50 38M 
    [   ]cmake-3.6.0-rc1-Linux-x86_64.tar.gz2016-06-03 14:49 27M 
    [TXT]cmake-3.6.0-rc1-SHA-256.txt2016-06-03 14:49 1.5K 
    [TXT]cmake-3.6.0-rc1-SHA-256.txt.asc2016-06-03 14:49 819  
    [   ]cmake-3.6.0-rc1-win32-x86.msi2016-06-03 14:49 15M 
    [   ]cmake-3.6.0-rc1-win32-x86.zip2016-06-03 14:49 21M 
    [   ]cmake-3.6.0-rc1-win64-x64.msi2016-06-03 14:49 15M 
    [   ]cmake-3.6.0-rc1-win64-x64.zip2016-06-03 14:49 19M 
    [   ]cmake-3.6.0-rc1.tar.Z2016-06-03 14:49 11M 
    [   ]cmake-3.6.0-rc1.tar.gz2016-06-03 14:49 6.6M 
    [   ]cmake-3.6.0-rc1.zip2016-06-03 14:49 11M 
    [   ]cmake-3.6.0-rc2-Darwin-x86_64.dmg2016-06-13 14:29 25M 
    [   ]cmake-3.6.0-rc2-Darwin-x86_64.tar.Z2016-06-13 14:29 36M 
    [   ]cmake-3.6.0-rc2-Darwin-x86_64.tar.gz2016-06-13 14:29 25M 
    [TXT]cmake-3.6.0-rc2-Linux-i386.sh2016-06-13 14:29 27M 
    [   ]cmake-3.6.0-rc2-Linux-i386.tar.Z2016-06-13 14:29 38M 
    [   ]cmake-3.6.0-rc2-Linux-i386.tar.gz2016-06-13 14:29 27M 
    [TXT]cmake-3.6.0-rc2-Linux-x86_64.sh2016-06-13 14:29 27M 
    [   ]cmake-3.6.0-rc2-Linux-x86_64.tar.Z2016-06-13 14:29 38M 
    [   ]cmake-3.6.0-rc2-Linux-x86_64.tar.gz2016-06-13 14:29 27M 
    [TXT]cmake-3.6.0-rc2-SHA-256.txt2016-06-13 14:29 1.5K 
    [TXT]cmake-3.6.0-rc2-SHA-256.txt.asc2016-06-13 14:29 819  
    [   ]cmake-3.6.0-rc2-win32-x86.msi2016-06-13 14:29 15M 
    [   ]cmake-3.6.0-rc2-win32-x86.zip2016-06-13 14:29 21M 
    [   ]cmake-3.6.0-rc2-win64-x64.msi2016-06-13 14:29 15M 
    [   ]cmake-3.6.0-rc2-win64-x64.zip2016-06-13 14:29 19M 
    [   ]cmake-3.6.0-rc2.tar.Z2016-06-13 14:29 11M 
    [   ]cmake-3.6.0-rc2.tar.gz2016-06-13 14:28 6.6M 
    [   ]cmake-3.6.0-rc2.zip2016-06-13 14:28 11M 
    [   ]cmake-3.6.0-rc3-Darwin-x86_64.dmg2016-06-22 13:58 25M 
    [   ]cmake-3.6.0-rc3-Darwin-x86_64.tar.Z2016-06-22 13:58 36M 
    [   ]cmake-3.6.0-rc3-Darwin-x86_64.tar.gz2016-06-22 13:57 25M 
    [TXT]cmake-3.6.0-rc3-Linux-i386.sh2016-06-22 13:57 27M 
    [   ]cmake-3.6.0-rc3-Linux-i386.tar.Z2016-06-22 13:57 38M 
    [   ]cmake-3.6.0-rc3-Linux-i386.tar.gz2016-06-22 13:57 27M 
    [TXT]cmake-3.6.0-rc3-Linux-x86_64.sh2016-06-22 13:57 27M 
    [   ]cmake-3.6.0-rc3-Linux-x86_64.tar.Z2016-06-22 13:57 38M 
    [   ]cmake-3.6.0-rc3-Linux-x86_64.tar.gz2016-06-22 13:57 27M 
    [TXT]cmake-3.6.0-rc3-SHA-256.txt2016-06-22 13:57 1.5K 
    [TXT]cmake-3.6.0-rc3-SHA-256.txt.asc2016-06-22 13:57 819  
    [   ]cmake-3.6.0-rc3-win32-x86.msi2016-06-22 13:57 15M 
    [   ]cmake-3.6.0-rc3-win32-x86.zip2016-06-22 13:57 21M 
    [   ]cmake-3.6.0-rc3-win64-x64.msi2016-06-22 13:57 15M 
    [   ]cmake-3.6.0-rc3-win64-x64.zip2016-06-22 13:57 19M 
    [   ]cmake-3.6.0-rc3.tar.Z2016-06-22 13:57 11M 
    [   ]cmake-3.6.0-rc3.tar.gz2016-06-22 13:57 6.6M 
    [   ]cmake-3.6.0-rc3.zip2016-06-22 13:57 11M 
    [   ]cmake-3.6.0-rc4-Darwin-x86_64.dmg2016-06-29 14:50 25M 
    [   ]cmake-3.6.0-rc4-Darwin-x86_64.tar.Z2016-06-29 14:50 36M 
    [   ]cmake-3.6.0-rc4-Darwin-x86_64.tar.gz2016-06-29 14:50 25M 
    [TXT]cmake-3.6.0-rc4-Linux-i386.sh2016-06-29 14:50 27M 
    [   ]cmake-3.6.0-rc4-Linux-i386.tar.Z2016-06-29 14:50 38M 
    [   ]cmake-3.6.0-rc4-Linux-i386.tar.gz2016-06-29 14:50 27M 
    [TXT]cmake-3.6.0-rc4-Linux-x86_64.sh2016-06-29 14:50 27M 
    [   ]cmake-3.6.0-rc4-Linux-x86_64.tar.Z2016-06-29 14:49 38M 
    [   ]cmake-3.6.0-rc4-Linux-x86_64.tar.gz2016-06-29 14:49 27M 
    [TXT]cmake-3.6.0-rc4-SHA-256.txt2016-06-29 14:49 1.5K 
    [TXT]cmake-3.6.0-rc4-SHA-256.txt.asc2016-06-29 14:49 819  
    [   ]cmake-3.6.0-rc4-win32-x86.msi2016-06-29 14:49 15M 
    [   ]cmake-3.6.0-rc4-win32-x86.zip2016-06-29 14:49 21M 
    [   ]cmake-3.6.0-rc4-win64-x64.msi2016-06-29 14:49 15M 
    [   ]cmake-3.6.0-rc4-win64-x64.zip2016-06-29 14:49 19M 
    [   ]cmake-3.6.0-rc4.tar.Z2016-06-29 14:49 11M 
    [   ]cmake-3.6.0-rc4.tar.gz2016-06-29 14:49 6.6M 
    [   ]cmake-3.6.0-rc4.zip2016-06-29 14:49 11M 
    [   ]cmake-3.6.0-win32-x86.msi2016-07-07 13:04 15M 
    [   ]cmake-3.6.0-win32-x86.zip2016-07-07 13:04 20M 
    [   ]cmake-3.6.0-win64-x64.msi2016-07-07 13:04 15M 
    [   ]cmake-3.6.0-win64-x64.zip2016-07-07 13:04 19M 
    [   ]cmake-3.6.0.tar.Z2016-07-07 13:04 11M 
    [   ]cmake-3.6.0.tar.gz2016-07-07 13:04 6.6M 
    [   ]cmake-3.6.0.zip2016-07-07 13:04 11M 
    [   ]cmake-3.6.1-Darwin-x86_64.dmg2016-07-22 10:58 25M 
    [   ]cmake-3.6.1-Darwin-x86_64.tar.Z2016-07-22 10:58 36M 
    [   ]cmake-3.6.1-Darwin-x86_64.tar.gz2016-07-22 10:58 25M 
    [TXT]cmake-3.6.1-Linux-i386.sh2016-07-22 10:58 27M 
    [   ]cmake-3.6.1-Linux-i386.tar.Z2016-07-22 10:58 38M 
    [   ]cmake-3.6.1-Linux-i386.tar.gz2016-07-22 10:58 27M 
    [TXT]cmake-3.6.1-Linux-x86_64.sh2016-07-22 10:58 27M 
    [   ]cmake-3.6.1-Linux-x86_64.tar.Z2016-07-22 10:58 38M 
    [   ]cmake-3.6.1-Linux-x86_64.tar.gz2016-07-22 10:58 27M 
    [TXT]cmake-3.6.1-SHA-256.txt2016-07-22 10:58 1.4K 
    [TXT]cmake-3.6.1-SHA-256.txt.asc2016-07-22 10:58 819  
    [   ]cmake-3.6.1-win32-x86.msi2016-07-22 10:58 15M 
    [   ]cmake-3.6.1-win32-x86.zip2016-07-22 10:58 20M 
    [   ]cmake-3.6.1-win64-x64.msi2016-07-22 10:58 15M 
    [   ]cmake-3.6.1-win64-x64.zip2016-07-22 10:57 19M 
    [   ]cmake-3.6.1.tar.Z2016-07-22 10:57 11M 
    [   ]cmake-3.6.1.tar.gz2016-07-22 10:57 6.6M 
    [   ]cmake-3.6.1.zip2016-07-22 10:57 11M 
    [   ]cmake-3.6.2-Darwin-x86_64.dmg2016-09-07 14:29 25M 
    [   ]cmake-3.6.2-Darwin-x86_64.tar.Z2016-09-07 14:29 36M 
    [   ]cmake-3.6.2-Darwin-x86_64.tar.gz2016-09-07 14:29 25M 
    [TXT]cmake-3.6.2-Linux-i386.sh2016-09-07 14:29 27M 
    [   ]cmake-3.6.2-Linux-i386.tar.Z2016-09-07 14:29 38M 
    [   ]cmake-3.6.2-Linux-i386.tar.gz2016-09-07 14:28 27M 
    [TXT]cmake-3.6.2-Linux-x86_64.sh2016-09-07 14:28 27M 
    [   ]cmake-3.6.2-Linux-x86_64.tar.Z2016-09-07 14:28 38M 
    [   ]cmake-3.6.2-Linux-x86_64.tar.gz2016-09-07 14:28 27M 
    [TXT]cmake-3.6.2-SHA-256.txt2016-09-07 14:28 1.4K 
    [TXT]cmake-3.6.2-SHA-256.txt.asc2016-09-07 14:28 819  
    [   ]cmake-3.6.2-win32-x86.msi2016-09-07 14:28 15M 
    [   ]cmake-3.6.2-win32-x86.zip2016-09-07 14:28 20M 
    [   ]cmake-3.6.2-win64-x64.msi2016-09-07 14:28 15M 
    [   ]cmake-3.6.2-win64-x64.zip2016-09-07 14:28 19M 
    [   ]cmake-3.6.2.tar.Z2016-09-07 14:28 11M 
    [   ]cmake-3.6.2.tar.gz2016-09-07 14:28 6.6M 
    [   ]cmake-3.6.2.zip2016-09-07 14:28 11M 
    [   ]cmake-3.6.3-Darwin-x86_64.dmg2016-11-03 12:13 25M 
    [   ]cmake-3.6.3-Darwin-x86_64.tar.Z2016-11-03 12:13 36M 
    [   ]cmake-3.6.3-Darwin-x86_64.tar.gz2016-11-03 12:13 25M 
    [TXT]cmake-3.6.3-Linux-i386.sh2016-11-03 12:13 27M 
    [   ]cmake-3.6.3-Linux-i386.tar.Z2016-11-03 12:13 38M 
    [   ]cmake-3.6.3-Linux-i386.tar.gz2016-11-03 12:13 27M 
    [TXT]cmake-3.6.3-Linux-x86_64.sh2016-11-03 12:13 27M 
    [   ]cmake-3.6.3-Linux-x86_64.tar.Z2016-11-03 12:13 38M 
    [   ]cmake-3.6.3-Linux-x86_64.tar.gz2016-11-03 12:13 27M 
    [TXT]cmake-3.6.3-SHA-256.txt2016-11-03 12:13 1.4K 
    [TXT]cmake-3.6.3-SHA-256.txt.asc2016-11-03 12:13 801  
    [   ]cmake-3.6.3-win32-x86.msi2016-11-03 12:13 15M 
    [   ]cmake-3.6.3-win32-x86.zip2016-11-03 12:13 20M 
    [   ]cmake-3.6.3-win64-x64.msi2016-11-03 12:12 17M 
    [   ]cmake-3.6.3-win64-x64.zip2016-11-03 12:12 23M 
    [   ]cmake-3.6.3.tar.Z2016-11-03 12:12 11M 
    [   ]cmake-3.6.3.tar.gz2016-11-03 12:12 6.6M 
    [   ]cmake-3.6.3.zip2016-11-03 12:12 11M 
    [DIR]cygwin/2016-09-07 14:44 -  

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.7/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.7/index.html deleted file mode 100644 index 4812f93f9f..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.7/index.html +++ /dev/null @@ -1,92 +0,0 @@ - - - - Index of /files/v3.7 - - -

    Index of /files/v3.7

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.7.0-Darwin-x86_64.dmg2016-11-11 14:01 26M 
    [   ]cmake-3.7.0-Darwin-x86_64.tar.gz2016-11-11 14:01 25M 
    [TXT]cmake-3.7.0-Linux-x86_64.sh2016-11-11 14:01 29M 
    [   ]cmake-3.7.0-Linux-x86_64.tar.gz2016-11-11 14:01 29M 
    [TXT]cmake-3.7.0-SHA-256.txt2016-11-11 14:01 1.0K 
    [TXT]cmake-3.7.0-SHA-256.txt.asc2016-11-11 14:01 801  
    [   ]cmake-3.7.0-rc1-Darwin-x86_64.dmg2016-10-04 15:23 26M 
    [   ]cmake-3.7.0-rc1-Darwin-x86_64.tar.gz2016-10-04 15:23 25M 
    [TXT]cmake-3.7.0-rc1-Linux-x86_64.sh2016-10-04 15:23 29M 
    [   ]cmake-3.7.0-rc1-Linux-x86_64.tar.gz2016-10-04 15:23 29M 
    [TXT]cmake-3.7.0-rc1-SHA-256.txt2016-10-04 15:23 1.0K 
    [TXT]cmake-3.7.0-rc1-SHA-256.txt.asc2016-10-04 15:23 801  
    [   ]cmake-3.7.0-rc1-win32-x86.msi2016-10-04 15:23 15M 
    [   ]cmake-3.7.0-rc1-win32-x86.zip2016-10-04 15:23 21M 
    [   ]cmake-3.7.0-rc1-win64-x64.msi2016-10-04 15:23 17M 
    [   ]cmake-3.7.0-rc1-win64-x64.zip2016-10-04 15:23 24M 
    [   ]cmake-3.7.0-rc1.tar.Z2016-10-04 15:23 11M 
    [   ]cmake-3.7.0-rc1.tar.gz2016-10-04 15:23 7.0M 
    [   ]cmake-3.7.0-rc1.zip2016-10-04 15:23 12M 
    [   ]cmake-3.7.0-rc2-Darwin-x86_64.dmg2016-10-19 15:24 26M 
    [   ]cmake-3.7.0-rc2-Darwin-x86_64.tar.gz2016-10-19 15:24 25M 
    [TXT]cmake-3.7.0-rc2-Linux-x86_64.sh2016-10-19 15:24 29M 
    [   ]cmake-3.7.0-rc2-Linux-x86_64.tar.gz2016-10-19 15:24 29M 
    [TXT]cmake-3.7.0-rc2-SHA-256.txt2016-10-19 15:24 1.0K 
    [TXT]cmake-3.7.0-rc2-SHA-256.txt.asc2016-10-19 15:24 801  
    [   ]cmake-3.7.0-rc2-win32-x86.msi2016-10-19 15:23 15M 
    [   ]cmake-3.7.0-rc2-win32-x86.zip2016-10-19 15:23 21M 
    [   ]cmake-3.7.0-rc2-win64-x64.msi2016-10-19 15:23 17M 
    [   ]cmake-3.7.0-rc2-win64-x64.zip2016-10-19 15:23 24M 
    [   ]cmake-3.7.0-rc2.tar.Z2016-10-19 15:23 11M 
    [   ]cmake-3.7.0-rc2.tar.gz2016-10-19 15:23 7.0M 
    [   ]cmake-3.7.0-rc2.zip2016-10-19 15:23 12M 
    [   ]cmake-3.7.0-rc3-Darwin-x86_64.dmg2016-11-04 15:26 26M 
    [   ]cmake-3.7.0-rc3-Darwin-x86_64.tar.gz2016-11-04 15:26 25M 
    [TXT]cmake-3.7.0-rc3-Linux-x86_64.sh2016-11-04 15:26 29M 
    [   ]cmake-3.7.0-rc3-Linux-x86_64.tar.gz2016-11-04 15:26 29M 
    [TXT]cmake-3.7.0-rc3-SHA-256.txt2016-11-04 15:26 1.0K 
    [TXT]cmake-3.7.0-rc3-SHA-256.txt.asc2016-11-04 15:26 801  
    [   ]cmake-3.7.0-rc3-win32-x86.msi2016-11-04 15:26 15M 
    [   ]cmake-3.7.0-rc3-win32-x86.zip2016-11-04 15:26 21M 
    [   ]cmake-3.7.0-rc3-win64-x64.msi2016-11-04 15:26 17M 
    [   ]cmake-3.7.0-rc3-win64-x64.zip2016-11-04 15:26 24M 
    [   ]cmake-3.7.0-rc3.tar.Z2016-11-04 15:26 11M 
    [   ]cmake-3.7.0-rc3.tar.gz2016-11-04 15:26 7.0M 
    [   ]cmake-3.7.0-rc3.zip2016-11-04 15:26 12M 
    [   ]cmake-3.7.0-win32-x86.msi2016-11-11 14:01 15M 
    [   ]cmake-3.7.0-win32-x86.zip2016-11-11 14:01 21M 
    [   ]cmake-3.7.0-win64-x64.msi2016-11-11 14:01 17M 
    [   ]cmake-3.7.0-win64-x64.zip2016-11-11 14:00 24M 
    [   ]cmake-3.7.0.tar.Z2016-11-11 14:00 11M 
    [   ]cmake-3.7.0.tar.gz2016-11-11 14:00 7.0M 
    [   ]cmake-3.7.0.zip2016-11-11 14:00 11M 
    [   ]cmake-3.7.1-Darwin-x86_64.dmg2016-11-30 14:25 26M 
    [   ]cmake-3.7.1-Darwin-x86_64.tar.gz2016-11-30 14:25 25M 
    [TXT]cmake-3.7.1-Linux-x86_64.sh2016-11-30 14:25 29M 
    [   ]cmake-3.7.1-Linux-x86_64.tar.gz2016-11-30 14:25 29M 
    [TXT]cmake-3.7.1-SHA-256.txt2016-11-30 14:25 1.0K 
    [TXT]cmake-3.7.1-SHA-256.txt.asc2016-11-30 14:25 833  
    [   ]cmake-3.7.1-win32-x86.msi2016-11-30 14:25 15M 
    [   ]cmake-3.7.1-win32-x86.zip2016-11-30 14:25 21M 
    [   ]cmake-3.7.1-win64-x64.msi2016-11-30 14:24 17M 
    [   ]cmake-3.7.1-win64-x64.zip2016-11-30 14:24 24M 
    [   ]cmake-3.7.1.tar.Z2016-11-30 14:24 11M 
    [   ]cmake-3.7.1.tar.gz2016-11-30 14:24 7.0M 
    [   ]cmake-3.7.1.zip2016-11-30 14:24 11M 
    [   ]cmake-3.7.2-Darwin-x86_64.dmg2017-01-13 14:13 26M 
    [   ]cmake-3.7.2-Darwin-x86_64.tar.gz2017-01-13 14:13 25M 
    [TXT]cmake-3.7.2-Linux-x86_64.sh2017-01-13 14:13 29M 
    [   ]cmake-3.7.2-Linux-x86_64.tar.gz2017-01-13 14:13 29M 
    [TXT]cmake-3.7.2-SHA-256.txt2017-01-13 14:13 1.0K 
    [TXT]cmake-3.7.2-SHA-256.txt.asc2017-01-13 14:13 833  
    [   ]cmake-3.7.2-win32-x86.msi2017-01-13 14:13 15M 
    [   ]cmake-3.7.2-win32-x86.zip2017-01-13 14:12 21M 
    [   ]cmake-3.7.2-win64-x64.msi2017-01-13 14:12 17M 
    [   ]cmake-3.7.2-win64-x64.zip2017-01-13 14:12 24M 
    [   ]cmake-3.7.2.tar.Z2017-01-13 14:12 11M 
    [   ]cmake-3.7.2.tar.gz2017-01-13 14:12 7.0M 
    [   ]cmake-3.7.2.zip2017-01-13 14:12 11M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.8/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.8/index.html deleted file mode 100644 index 5fc8caa1d5..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.8/index.html +++ /dev/null @@ -1,105 +0,0 @@ - - - - Index of /files/v3.8 - - -

    Index of /files/v3.8

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.8.0-Darwin-x86_64.dmg2017-04-10 13:39 26M 
    [   ]cmake-3.8.0-Darwin-x86_64.tar.gz2017-04-10 13:39 26M 
    [TXT]cmake-3.8.0-Linux-x86_64.sh2017-04-10 13:39 31M 
    [   ]cmake-3.8.0-Linux-x86_64.tar.gz2017-04-10 13:39 31M 
    [TXT]cmake-3.8.0-SHA-256.txt2017-04-10 13:39 1.0K 
    [TXT]cmake-3.8.0-SHA-256.txt.asc2017-04-10 13:39 833  
    [   ]cmake-3.8.0-rc1-Darwin-x86_64.dmg2017-02-07 12:54 26M 
    [   ]cmake-3.8.0-rc1-Darwin-x86_64.tar.gz2017-02-07 12:54 26M 
    [TXT]cmake-3.8.0-rc1-Linux-x86_64.sh2017-02-07 12:54 31M 
    [   ]cmake-3.8.0-rc1-Linux-x86_64.tar.gz2017-02-07 12:54 31M 
    [TXT]cmake-3.8.0-rc1-SHA-256.txt2017-02-07 12:54 1.0K 
    [TXT]cmake-3.8.0-rc1-SHA-256.txt.asc2017-02-07 12:54 833  
    [   ]cmake-3.8.0-rc1-win32-x86.msi2017-02-07 12:54 15M 
    [   ]cmake-3.8.0-rc1-win32-x86.zip2017-02-07 12:54 21M 
    [   ]cmake-3.8.0-rc1-win64-x64.msi2017-02-07 12:54 18M 
    [   ]cmake-3.8.0-rc1-win64-x64.zip2017-02-07 12:54 24M 
    [   ]cmake-3.8.0-rc1.tar.Z2017-02-07 12:54 12M 
    [   ]cmake-3.8.0-rc1.tar.gz2017-02-07 12:54 7.1M 
    [   ]cmake-3.8.0-rc1.zip2017-02-07 12:54 12M 
    [   ]cmake-3.8.0-rc2-Darwin-x86_64.dmg2017-03-03 10:00 26M 
    [   ]cmake-3.8.0-rc2-Darwin-x86_64.tar.gz2017-03-03 10:00 26M 
    [TXT]cmake-3.8.0-rc2-Linux-x86_64.sh2017-03-03 10:00 31M 
    [   ]cmake-3.8.0-rc2-Linux-x86_64.tar.gz2017-03-03 09:59 31M 
    [TXT]cmake-3.8.0-rc2-SHA-256.txt2017-03-03 09:59 1.0K 
    [TXT]cmake-3.8.0-rc2-SHA-256.txt.asc2017-03-03 09:59 833  
    [   ]cmake-3.8.0-rc2-win32-x86.msi2017-03-03 09:59 16M 
    [   ]cmake-3.8.0-rc2-win32-x86.zip2017-03-03 09:59 22M 
    [   ]cmake-3.8.0-rc2-win64-x64.msi2017-03-03 09:59 18M 
    [   ]cmake-3.8.0-rc2-win64-x64.zip2017-03-03 09:59 24M 
    [   ]cmake-3.8.0-rc2.tar.Z2017-03-03 09:59 12M 
    [   ]cmake-3.8.0-rc2.tar.gz2017-03-03 09:59 7.2M 
    [   ]cmake-3.8.0-rc2.zip2017-03-03 09:59 12M 
    [   ]cmake-3.8.0-rc3-Darwin-x86_64.dmg2017-03-24 13:52 26M 
    [   ]cmake-3.8.0-rc3-Darwin-x86_64.tar.gz2017-03-24 13:52 26M 
    [TXT]cmake-3.8.0-rc3-Linux-x86_64.sh2017-03-24 13:52 31M 
    [   ]cmake-3.8.0-rc3-Linux-x86_64.tar.gz2017-03-24 13:52 31M 
    [TXT]cmake-3.8.0-rc3-SHA-256.txt2017-03-24 13:52 1.0K 
    [TXT]cmake-3.8.0-rc3-SHA-256.txt.asc2017-03-24 13:52 833  
    [   ]cmake-3.8.0-rc3-win32-x86.msi2017-03-24 13:52 16M 
    [   ]cmake-3.8.0-rc3-win32-x86.zip2017-03-24 13:52 22M 
    [   ]cmake-3.8.0-rc3-win64-x64.msi2017-03-24 13:52 18M 
    [   ]cmake-3.8.0-rc3-win64-x64.zip2017-03-24 13:52 24M 
    [   ]cmake-3.8.0-rc3.tar.Z2017-03-24 13:52 12M 
    [   ]cmake-3.8.0-rc3.tar.gz2017-03-24 13:52 7.2M 
    [   ]cmake-3.8.0-rc3.zip2017-03-24 13:52 12M 
    [   ]cmake-3.8.0-rc4-Darwin-x86_64.dmg2017-03-30 11:38 26M 
    [   ]cmake-3.8.0-rc4-Darwin-x86_64.tar.gz2017-03-30 11:38 26M 
    [TXT]cmake-3.8.0-rc4-Linux-x86_64.sh2017-03-30 11:38 31M 
    [   ]cmake-3.8.0-rc4-Linux-x86_64.tar.gz2017-03-30 11:38 31M 
    [TXT]cmake-3.8.0-rc4-SHA-256.txt2017-03-30 11:38 1.0K 
    [TXT]cmake-3.8.0-rc4-SHA-256.txt.asc2017-03-30 11:38 833  
    [   ]cmake-3.8.0-rc4-win32-x86.msi2017-03-30 11:38 16M 
    [   ]cmake-3.8.0-rc4-win32-x86.zip2017-03-30 11:38 22M 
    [   ]cmake-3.8.0-rc4-win64-x64.msi2017-03-30 11:38 18M 
    [   ]cmake-3.8.0-rc4-win64-x64.zip2017-03-30 11:38 24M 
    [   ]cmake-3.8.0-rc4.tar.Z2017-03-30 11:38 12M 
    [   ]cmake-3.8.0-rc4.tar.gz2017-03-30 11:38 7.2M 
    [   ]cmake-3.8.0-rc4.zip2017-03-30 11:38 12M 
    [   ]cmake-3.8.0-win32-x86.msi2017-04-10 13:39 16M 
    [   ]cmake-3.8.0-win32-x86.zip2017-04-10 13:39 22M 
    [   ]cmake-3.8.0-win64-x64.msi2017-04-10 13:39 18M 
    [   ]cmake-3.8.0-win64-x64.zip2017-04-10 13:38 24M 
    [   ]cmake-3.8.0.tar.Z2017-04-10 13:38 12M 
    [   ]cmake-3.8.0.tar.gz2017-04-10 13:38 7.2M 
    [   ]cmake-3.8.0.zip2017-04-10 13:38 12M 
    [   ]cmake-3.8.1-Darwin-x86_64.dmg2017-05-02 11:06 26M 
    [   ]cmake-3.8.1-Darwin-x86_64.tar.gz2017-05-02 11:06 26M 
    [TXT]cmake-3.8.1-Linux-x86_64.sh2017-05-02 11:06 31M 
    [   ]cmake-3.8.1-Linux-x86_64.tar.gz2017-05-02 11:05 31M 
    [TXT]cmake-3.8.1-SHA-256.txt2017-05-02 11:05 1.0K 
    [TXT]cmake-3.8.1-SHA-256.txt.asc2017-05-02 11:05 833  
    [   ]cmake-3.8.1-win32-x86.msi2017-05-02 11:05 16M 
    [   ]cmake-3.8.1-win32-x86.zip2017-05-02 11:05 22M 
    [   ]cmake-3.8.1-win64-x64.msi2017-05-02 11:05 18M 
    [   ]cmake-3.8.1-win64-x64.zip2017-05-02 11:05 24M 
    [   ]cmake-3.8.1.tar.Z2017-05-02 11:05 12M 
    [   ]cmake-3.8.1.tar.gz2017-05-02 11:05 7.2M 
    [   ]cmake-3.8.1.zip2017-05-02 11:05 12M 
    [   ]cmake-3.8.2-Darwin-x86_64.dmg2017-05-31 12:26 26M 
    [   ]cmake-3.8.2-Darwin-x86_64.tar.gz2017-05-31 12:26 25M 
    [TXT]cmake-3.8.2-Linux-x86_64.sh2017-05-31 12:26 31M 
    [   ]cmake-3.8.2-Linux-x86_64.tar.gz2017-05-31 12:26 31M 
    [TXT]cmake-3.8.2-SHA-256.txt2017-05-31 12:26 1.0K 
    [TXT]cmake-3.8.2-SHA-256.txt.asc2017-05-31 12:26 833  
    [   ]cmake-3.8.2-win32-x86.msi2017-05-31 12:26 16M 
    [   ]cmake-3.8.2-win32-x86.zip2017-05-31 12:26 22M 
    [   ]cmake-3.8.2-win64-x64.msi2017-05-31 12:25 18M 
    [   ]cmake-3.8.2-win64-x64.zip2017-05-31 12:25 24M 
    [   ]cmake-3.8.2.tar.Z2017-05-31 12:25 12M 
    [   ]cmake-3.8.2.tar.gz2017-05-31 12:25 7.2M 
    [   ]cmake-3.8.2.zip2017-05-31 12:25 12M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/files/v3.9/index.html b/bitbake/lib/bb/tests/fetch-testdata/files/v3.9/index.html deleted file mode 100644 index 54182afa05..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/files/v3.9/index.html +++ /dev/null @@ -1,183 +0,0 @@ - - - - Index of /files/v3.9 - - -

    Index of /files/v3.9

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]cmake-3.9.0-Darwin-x86_64.dmg2017-07-18 13:32 26M 
    [   ]cmake-3.9.0-Darwin-x86_64.tar.gz2017-07-18 13:32 25M 
    [TXT]cmake-3.9.0-Linux-x86_64.sh2017-07-18 13:32 31M 
    [   ]cmake-3.9.0-Linux-x86_64.tar.gz2017-07-18 13:32 31M 
    [TXT]cmake-3.9.0-SHA-256.txt2017-07-18 13:32 1.0K 
    [TXT]cmake-3.9.0-SHA-256.txt.asc2017-07-18 13:32 833  
    [   ]cmake-3.9.0-rc1-Darwin-x86_64.dmg2017-06-05 14:48 26M 
    [   ]cmake-3.9.0-rc1-Darwin-x86_64.tar.gz2017-06-05 14:48 25M 
    [TXT]cmake-3.9.0-rc1-Linux-x86_64.sh2017-06-05 14:48 31M 
    [   ]cmake-3.9.0-rc1-Linux-x86_64.tar.gz2017-06-05 14:48 31M 
    [TXT]cmake-3.9.0-rc1-SHA-256.txt2017-06-05 14:48 1.0K 
    [TXT]cmake-3.9.0-rc1-SHA-256.txt.asc2017-06-05 14:47 833  
    [   ]cmake-3.9.0-rc1-win32-x86.msi2017-06-05 14:47 16M 
    [   ]cmake-3.9.0-rc1-win32-x86.zip2017-06-05 14:47 22M 
    [   ]cmake-3.9.0-rc1-win64-x64.msi2017-06-05 14:47 18M 
    [   ]cmake-3.9.0-rc1-win64-x64.zip2017-06-05 14:47 25M 
    [   ]cmake-3.9.0-rc1.tar.Z2017-06-05 14:47 12M 
    [   ]cmake-3.9.0-rc1.tar.gz2017-06-05 14:47 7.3M 
    [   ]cmake-3.9.0-rc1.zip2017-06-05 14:47 12M 
    [   ]cmake-3.9.0-rc2-Darwin-x86_64.dmg2017-06-07 14:46 26M 
    [   ]cmake-3.9.0-rc2-Darwin-x86_64.tar.gz2017-06-07 14:46 25M 
    [TXT]cmake-3.9.0-rc2-Linux-x86_64.sh2017-06-07 14:46 31M 
    [   ]cmake-3.9.0-rc2-Linux-x86_64.tar.gz2017-06-07 14:46 31M 
    [TXT]cmake-3.9.0-rc2-SHA-256.txt2017-06-07 14:46 1.0K 
    [TXT]cmake-3.9.0-rc2-SHA-256.txt.asc2017-06-07 14:46 833  
    [   ]cmake-3.9.0-rc2-win32-x86.msi2017-06-07 14:46 16M 
    [   ]cmake-3.9.0-rc2-win32-x86.zip2017-06-07 14:46 22M 
    [   ]cmake-3.9.0-rc2-win64-x64.msi2017-06-07 14:46 18M 
    [   ]cmake-3.9.0-rc2-win64-x64.zip2017-06-07 14:46 25M 
    [   ]cmake-3.9.0-rc2.tar.Z2017-06-07 14:46 12M 
    [   ]cmake-3.9.0-rc2.tar.gz2017-06-07 14:46 7.3M 
    [   ]cmake-3.9.0-rc2.zip2017-06-07 14:46 12M 
    [   ]cmake-3.9.0-rc3-Darwin-x86_64.dmg2017-06-13 14:02 26M 
    [   ]cmake-3.9.0-rc3-Darwin-x86_64.tar.gz2017-06-13 14:02 25M 
    [TXT]cmake-3.9.0-rc3-Linux-x86_64.sh2017-06-13 14:02 31M 
    [   ]cmake-3.9.0-rc3-Linux-x86_64.tar.gz2017-06-13 14:02 31M 
    [TXT]cmake-3.9.0-rc3-SHA-256.txt2017-06-13 14:02 1.0K 
    [TXT]cmake-3.9.0-rc3-SHA-256.txt.asc2017-06-13 14:02 833  
    [   ]cmake-3.9.0-rc3-win32-x86.msi2017-06-13 14:02 16M 
    [   ]cmake-3.9.0-rc3-win32-x86.zip2017-06-13 14:02 22M 
    [   ]cmake-3.9.0-rc3-win64-x64.msi2017-06-13 14:01 18M 
    [   ]cmake-3.9.0-rc3-win64-x64.zip2017-06-13 14:01 25M 
    [   ]cmake-3.9.0-rc3.tar.Z2017-06-13 14:01 12M 
    [   ]cmake-3.9.0-rc3.tar.gz2017-06-13 14:01 7.3M 
    [   ]cmake-3.9.0-rc3.zip2017-06-13 14:01 12M 
    [   ]cmake-3.9.0-rc4-Darwin-x86_64.dmg2017-06-22 13:27 26M 
    [   ]cmake-3.9.0-rc4-Darwin-x86_64.tar.gz2017-06-22 13:27 25M 
    [TXT]cmake-3.9.0-rc4-Linux-x86_64.sh2017-06-22 13:27 31M 
    [   ]cmake-3.9.0-rc4-Linux-x86_64.tar.gz2017-06-22 13:27 31M 
    [TXT]cmake-3.9.0-rc4-SHA-256.txt2017-06-22 13:27 1.0K 
    [TXT]cmake-3.9.0-rc4-SHA-256.txt.asc2017-06-22 13:26 833  
    [   ]cmake-3.9.0-rc4-win32-x86.msi2017-06-22 13:26 16M 
    [   ]cmake-3.9.0-rc4-win32-x86.zip2017-06-22 13:26 22M 
    [   ]cmake-3.9.0-rc4-win64-x64.msi2017-06-22 13:26 18M 
    [   ]cmake-3.9.0-rc4-win64-x64.zip2017-06-22 13:26 25M 
    [   ]cmake-3.9.0-rc4.tar.Z2017-06-22 13:26 12M 
    [   ]cmake-3.9.0-rc4.tar.gz2017-06-22 13:26 7.3M 
    [   ]cmake-3.9.0-rc4.zip2017-06-22 13:26 12M 
    [   ]cmake-3.9.0-rc5-Darwin-x86_64.dmg2017-06-27 13:56 26M 
    [   ]cmake-3.9.0-rc5-Darwin-x86_64.tar.gz2017-06-27 13:56 25M 
    [TXT]cmake-3.9.0-rc5-Linux-x86_64.sh2017-06-27 13:56 31M 
    [   ]cmake-3.9.0-rc5-Linux-x86_64.tar.gz2017-06-27 13:56 31M 
    [TXT]cmake-3.9.0-rc5-SHA-256.txt2017-06-27 13:56 1.0K 
    [TXT]cmake-3.9.0-rc5-SHA-256.txt.asc2017-06-27 13:56 833  
    [   ]cmake-3.9.0-rc5-win32-x86.msi2017-06-27 13:56 16M 
    [   ]cmake-3.9.0-rc5-win32-x86.zip2017-06-27 13:56 22M 
    [   ]cmake-3.9.0-rc5-win64-x64.msi2017-06-27 13:56 18M 
    [   ]cmake-3.9.0-rc5-win64-x64.zip2017-06-27 13:56 25M 
    [   ]cmake-3.9.0-rc5.tar.Z2017-06-27 13:55 12M 
    [   ]cmake-3.9.0-rc5.tar.gz2017-06-27 13:55 7.3M 
    [   ]cmake-3.9.0-rc5.zip2017-06-27 13:55 12M 
    [   ]cmake-3.9.0-rc6-Darwin-x86_64.dmg2017-07-12 11:46 26M 
    [   ]cmake-3.9.0-rc6-Darwin-x86_64.tar.gz2017-07-12 11:46 25M 
    [TXT]cmake-3.9.0-rc6-Linux-x86_64.sh2017-07-12 11:46 31M 
    [   ]cmake-3.9.0-rc6-Linux-x86_64.tar.gz2017-07-12 11:46 31M 
    [TXT]cmake-3.9.0-rc6-SHA-256.txt2017-07-12 11:46 1.0K 
    [TXT]cmake-3.9.0-rc6-SHA-256.txt.asc2017-07-12 11:46 833  
    [   ]cmake-3.9.0-rc6-win32-x86.msi2017-07-12 11:46 16M 
    [   ]cmake-3.9.0-rc6-win32-x86.zip2017-07-12 11:45 22M 
    [   ]cmake-3.9.0-rc6-win64-x64.msi2017-07-12 11:45 18M 
    [   ]cmake-3.9.0-rc6-win64-x64.zip2017-07-12 11:45 25M 
    [   ]cmake-3.9.0-rc6.tar.Z2017-07-12 11:45 12M 
    [   ]cmake-3.9.0-rc6.tar.gz2017-07-12 11:45 7.3M 
    [   ]cmake-3.9.0-rc6.zip2017-07-12 11:45 12M 
    [   ]cmake-3.9.0-win32-x86.msi2017-07-18 13:32 16M 
    [   ]cmake-3.9.0-win32-x86.zip2017-07-18 13:32 22M 
    [   ]cmake-3.9.0-win64-x64.msi2017-07-18 13:32 18M 
    [   ]cmake-3.9.0-win64-x64.zip2017-07-18 13:31 25M 
    [   ]cmake-3.9.0.tar.Z2017-07-18 13:31 12M 
    [   ]cmake-3.9.0.tar.gz2017-07-18 13:31 7.3M 
    [   ]cmake-3.9.0.zip2017-07-18 13:31 12M 
    [   ]cmake-3.9.1-Darwin-x86_64.dmg2017-08-10 11:49 26M 
    [   ]cmake-3.9.1-Darwin-x86_64.tar.gz2017-08-10 11:49 25M 
    [TXT]cmake-3.9.1-Linux-x86_64.sh2017-08-10 11:49 31M 
    [   ]cmake-3.9.1-Linux-x86_64.tar.gz2017-08-10 11:49 31M 
    [TXT]cmake-3.9.1-SHA-256.txt2017-08-10 11:49 1.0K 
    [TXT]cmake-3.9.1-SHA-256.txt.asc2017-08-10 11:49 833  
    [   ]cmake-3.9.1-win32-x86.msi2017-08-10 11:49 16M 
    [   ]cmake-3.9.1-win32-x86.zip2017-08-10 11:49 22M 
    [   ]cmake-3.9.1-win64-x64.msi2017-08-10 11:49 18M 
    [   ]cmake-3.9.1-win64-x64.zip2017-08-10 11:49 25M 
    [   ]cmake-3.9.1.tar.Z2017-08-10 11:49 12M 
    [   ]cmake-3.9.1.tar.gz2017-08-10 11:49 7.3M 
    [   ]cmake-3.9.1.zip2017-08-10 11:49 12M 
    [   ]cmake-3.9.2-Darwin-x86_64.dmg2017-09-07 15:55 26M 
    [   ]cmake-3.9.2-Darwin-x86_64.tar.gz2017-09-07 15:54 25M 
    [TXT]cmake-3.9.2-Linux-x86_64.sh2017-09-07 15:54 31M 
    [   ]cmake-3.9.2-Linux-x86_64.tar.gz2017-09-07 15:54 31M 
    [TXT]cmake-3.9.2-SHA-256.txt2017-09-07 15:54 1.0K 
    [TXT]cmake-3.9.2-SHA-256.txt.asc2017-09-07 15:54 833  
    [   ]cmake-3.9.2-win32-x86.msi2017-09-07 15:54 16M 
    [   ]cmake-3.9.2-win32-x86.zip2017-09-07 15:54 22M 
    [   ]cmake-3.9.2-win64-x64.msi2017-09-07 15:54 18M 
    [   ]cmake-3.9.2-win64-x64.zip2017-09-07 15:54 25M 
    [   ]cmake-3.9.2.tar.Z2017-09-07 15:54 12M 
    [   ]cmake-3.9.2.tar.gz2017-09-07 15:54 7.3M 
    [   ]cmake-3.9.2.zip2017-09-07 15:54 12M 
    [   ]cmake-3.9.3-Darwin-x86_64.dmg2017-09-20 11:59 26M 
    [   ]cmake-3.9.3-Darwin-x86_64.tar.gz2017-09-20 11:59 25M 
    [TXT]cmake-3.9.3-Linux-x86_64.sh2017-09-20 11:59 31M 
    [   ]cmake-3.9.3-Linux-x86_64.tar.gz2017-09-20 11:59 31M 
    [TXT]cmake-3.9.3-SHA-256.txt2017-09-20 11:59 1.0K 
    [TXT]cmake-3.9.3-SHA-256.txt.asc2017-09-20 11:59 833  
    [   ]cmake-3.9.3-win32-x86.msi2017-09-20 11:59 16M 
    [   ]cmake-3.9.3-win32-x86.zip2017-09-20 11:59 22M 
    [   ]cmake-3.9.3-win64-x64.msi2017-09-20 11:59 18M 
    [   ]cmake-3.9.3-win64-x64.zip2017-09-20 11:59 25M 
    [   ]cmake-3.9.3.tar.Z2017-09-20 11:59 12M 
    [   ]cmake-3.9.3.tar.gz2017-09-20 11:59 7.3M 
    [   ]cmake-3.9.3.zip2017-09-20 11:59 12M 
    [   ]cmake-3.9.4-Darwin-x86_64.dmg2017-10-04 09:43 26M 
    [   ]cmake-3.9.4-Darwin-x86_64.tar.gz2017-10-04 09:43 25M 
    [TXT]cmake-3.9.4-Linux-x86_64.sh2017-10-04 09:43 31M 
    [   ]cmake-3.9.4-Linux-x86_64.tar.gz2017-10-04 09:43 31M 
    [TXT]cmake-3.9.4-SHA-256.txt2017-10-04 09:43 1.0K 
    [TXT]cmake-3.9.4-SHA-256.txt.asc2017-10-04 09:42 833  
    [   ]cmake-3.9.4-win32-x86.msi2017-10-04 09:42 16M 
    [   ]cmake-3.9.4-win32-x86.zip2017-10-04 09:42 22M 
    [   ]cmake-3.9.4-win64-x64.msi2017-10-04 09:42 18M 
    [   ]cmake-3.9.4-win64-x64.zip2017-10-04 09:42 25M 
    [   ]cmake-3.9.4.tar.Z2017-10-04 09:42 12M 
    [   ]cmake-3.9.4.tar.gz2017-10-04 09:42 7.3M 
    [   ]cmake-3.9.4.zip2017-10-04 09:42 12M 
    [   ]cmake-3.9.5-Darwin-x86_64.dmg2017-11-03 10:26 26M 
    [   ]cmake-3.9.5-Darwin-x86_64.tar.gz2017-11-03 10:26 25M 
    [TXT]cmake-3.9.5-Linux-x86_64.sh2017-11-03 10:26 31M 
    [   ]cmake-3.9.5-Linux-x86_64.tar.gz2017-11-03 10:26 31M 
    [TXT]cmake-3.9.5-SHA-256.txt2017-11-03 10:26 1.0K 
    [TXT]cmake-3.9.5-SHA-256.txt.asc2017-11-03 10:26 833  
    [   ]cmake-3.9.5-win32-x86.msi2017-11-03 10:26 16M 
    [   ]cmake-3.9.5-win32-x86.zip2017-11-03 10:26 22M 
    [   ]cmake-3.9.5-win64-x64.msi2017-11-03 10:26 18M 
    [   ]cmake-3.9.5-win64-x64.zip2017-11-03 10:26 25M 
    [   ]cmake-3.9.5.tar.Z2017-11-03 10:26 12M 
    [   ]cmake-3.9.5.tar.gz2017-11-03 10:26 7.3M 
    [   ]cmake-3.9.5.zip2017-11-03 10:26 12M 
    [   ]cmake-3.9.6-Darwin-x86_64.dmg2017-11-10 09:22 26M 
    [   ]cmake-3.9.6-Darwin-x86_64.tar.gz2017-11-10 09:21 25M 
    [TXT]cmake-3.9.6-Linux-x86_64.sh2017-11-10 09:21 31M 
    [   ]cmake-3.9.6-Linux-x86_64.tar.gz2017-11-10 09:21 31M 
    [TXT]cmake-3.9.6-SHA-256.txt2017-11-10 09:21 1.0K 
    [TXT]cmake-3.9.6-SHA-256.txt.asc2017-11-10 09:21 833  
    [   ]cmake-3.9.6-win32-x86.msi2017-11-10 09:21 16M 
    [   ]cmake-3.9.6-win32-x86.zip2017-11-10 09:21 22M 
    [   ]cmake-3.9.6-win64-x64.msi2017-11-10 09:21 18M 
    [   ]cmake-3.9.6-win64-x64.zip2017-11-10 09:21 25M 
    [   ]cmake-3.9.6.tar.Z2017-11-10 09:21 12M 
    [   ]cmake-3.9.6.tar.gz2017-11-10 09:21 7.3M 
    [   ]cmake-3.9.6.zip2017-11-10 09:21 12M 

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.23/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.23/index.html deleted file mode 100644 index b3d9244b09..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.23/index.html +++ /dev/null @@ -1,45 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.23/ - -

    Index of /pub/linux/utils/util-linux/v2.23/


    ../
    -libblkid-docs/                                     31-Jul-2013 12:35       -
    -libmount-docs/                                     31-Jul-2013 12:39       -
    -sha256sums.asc                                     12-May-2017 10:53    2942
    -util-linux-2.23-rc1.tar.bz2                        22-Mar-2013 12:48      5M
    -util-linux-2.23-rc1.tar.gz                         22-Mar-2013 12:48      7M
    -util-linux-2.23-rc1.tar.sign                       22-Mar-2013 12:48     836
    -util-linux-2.23-rc1.tar.xz                         22-Mar-2013 12:48      3M
    -util-linux-2.23-rc2.tar.bz2                        10-Apr-2013 22:14      5M
    -util-linux-2.23-rc2.tar.gz                         10-Apr-2013 22:14      7M
    -util-linux-2.23-rc2.tar.sign                       10-Apr-2013 22:14     836
    -util-linux-2.23-rc2.tar.xz                         10-Apr-2013 22:14      3M
    -util-linux-2.23.1.tar.bz2                          28-May-2013 09:57      5M
    -util-linux-2.23.1.tar.gz                           28-May-2013 09:57      7M
    -util-linux-2.23.1.tar.sign                         28-May-2013 09:57     836
    -util-linux-2.23.1.tar.xz                           28-May-2013 09:57      3M
    -util-linux-2.23.2.tar.bz2                          31-Jul-2013 12:40      5M
    -util-linux-2.23.2.tar.gz                           31-Jul-2013 12:40      7M
    -util-linux-2.23.2.tar.sign                         31-Jul-2013 12:40     836
    -util-linux-2.23.2.tar.xz                           31-Jul-2013 12:40      3M
    -util-linux-2.23.tar.bz2                            25-Apr-2013 10:48      5M
    -util-linux-2.23.tar.gz                             25-Apr-2013 10:48      7M
    -util-linux-2.23.tar.sign                           25-Apr-2013 10:48     836
    -util-linux-2.23.tar.xz                             25-Apr-2013 10:48      3M
    -v2.23-ChangeLog                                    25-Apr-2013 10:48     19K
    -v2.23-ChangeLog.sign                               25-Apr-2013 10:48     836
    -v2.23-ReleaseNotes                                 25-Apr-2013 10:48     53K
    -v2.23-ReleaseNotes.sign                            25-Apr-2013 10:48     836
    -v2.23-rc1-ChangeLog                                22-Mar-2013 12:48    361K
    -v2.23-rc1-ChangeLog.sign                           22-Mar-2013 12:48     836
    -v2.23-rc2-ChangeLog                                10-Apr-2013 22:14     80K
    -v2.23-rc2-ChangeLog.sign                           10-Apr-2013 22:14     836
    -v2.23.1-ChangeLog                                  28-May-2013 09:57     13K
    -v2.23.1-ChangeLog.sign                             28-May-2013 09:57     836
    -v2.23.1-ReleaseNotes                               28-May-2013 09:58    1448
    -v2.23.1-ReleaseNotes.sign                          28-May-2013 09:58     836
    -v2.23.2-ChangeLog                                  31-Jul-2013 12:40     23K
    -v2.23.2-ChangeLog.sign                             31-Jul-2013 12:40     836
    -v2.23.2-ReleaseNotes                               31-Jul-2013 12:40    2582
    -v2.23.2-ReleaseNotes.sign                          31-Jul-2013 12:40     836
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.24/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.24/index.html deleted file mode 100644 index 4afb4625a0..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.24/index.html +++ /dev/null @@ -1,43 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.24/ - -

    Index of /pub/linux/utils/util-linux/v2.24/


    ../
    -libblkid-docs/                                     24-Apr-2014 10:15       -
    -libmount-docs/                                     24-Apr-2014 10:17       -
    -sha256sums.asc                                     12-May-2017 10:53    2758
    -util-linux-2.24-rc1.tar.bz2                        27-Sep-2013 12:54      5M
    -util-linux-2.24-rc1.tar.gz                         27-Sep-2013 12:54      7M
    -util-linux-2.24-rc1.tar.sign                       27-Sep-2013 12:54     836
    -util-linux-2.24-rc1.tar.xz                         27-Sep-2013 12:54      3M
    -util-linux-2.24-rc2.tar.bz2                        11-Oct-2013 11:37      5M
    -util-linux-2.24-rc2.tar.gz                         11-Oct-2013 11:37      7M
    -util-linux-2.24-rc2.tar.sign                       11-Oct-2013 11:37     836
    -util-linux-2.24-rc2.tar.xz                         11-Oct-2013 11:37      3M
    -util-linux-2.24.1.tar.gz                           20-Jan-2014 13:33      7M
    -util-linux-2.24.1.tar.sign                         20-Jan-2014 13:33     819
    -util-linux-2.24.1.tar.xz                           20-Jan-2014 13:33      3M
    -util-linux-2.24.2.tar.gz                           24-Apr-2014 10:17      7M
    -util-linux-2.24.2.tar.sign                         24-Apr-2014 10:17     819
    -util-linux-2.24.2.tar.xz                           24-Apr-2014 10:17      3M
    -util-linux-2.24.tar.bz2                            21-Oct-2013 13:49      5M
    -util-linux-2.24.tar.gz                             21-Oct-2013 13:49      7M
    -util-linux-2.24.tar.sign                           21-Oct-2013 13:49     836
    -util-linux-2.24.tar.xz                             21-Oct-2013 13:49      3M
    -v2.24-ChangeLog                                    21-Oct-2013 13:49     22K
    -v2.24-ChangeLog.sign                               21-Oct-2013 13:49     836
    -v2.24-ReleaseNotes                                 21-Oct-2013 13:49     44K
    -v2.24-ReleaseNotes.sign                            21-Oct-2013 13:49     836
    -v2.24-rc1-ChangeLog                                27-Sep-2013 12:54    292K
    -v2.24-rc1-ChangeLog.sign                           27-Sep-2013 12:54     836
    -v2.24-rc2-ChangeLog                                11-Oct-2013 11:37     42K
    -v2.24-rc2-ChangeLog.sign                           11-Oct-2013 11:37     836
    -v2.24.1-ChangeLog                                  20-Jan-2014 13:33     38K
    -v2.24.1-ChangeLog.sign                             20-Jan-2014 13:33     819
    -v2.24.1-ReleaseNotes                               20-Jan-2014 13:33    4449
    -v2.24.1-ReleaseNotes.sign                          20-Jan-2014 13:33     819
    -v2.24.2-ChangeLog                                  24-Apr-2014 10:17     47K
    -v2.24.2-ChangeLog.sign                             24-Apr-2014 10:17     819
    -v2.24.2-ReleaseNotes                               24-Apr-2014 10:18    5748
    -v2.24.2-ReleaseNotes.sign                          24-Apr-2014 10:18     819
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.25/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.25/index.html deleted file mode 100644 index 9516c3b1ee..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.25/index.html +++ /dev/null @@ -1,46 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.25/ - -

    Index of /pub/linux/utils/util-linux/v2.25/


    ../
    -libblkid-docs/                                     24-Oct-2014 13:05       -
    -libmount-docs/                                     24-Oct-2014 13:06       -
    -libsmartcols-docs/                                 24-Oct-2014 13:08       -
    -sha256sums.asc                                     12-May-2017 10:53    2758
    -util-linux-2.25-rc1.tar.gz                         18-Jun-2014 13:33      7M
    -util-linux-2.25-rc1.tar.sign                       18-Jun-2014 13:33     819
    -util-linux-2.25-rc1.tar.xz                         18-Jun-2014 13:33      3M
    -util-linux-2.25-rc2.tar.gz                         02-Jul-2014 10:02      7M
    -util-linux-2.25-rc2.tar.sign                       02-Jul-2014 10:02     819
    -util-linux-2.25-rc2.tar.xz                         02-Jul-2014 10:02      3M
    -util-linux-2.25.1-rc1.tar.gz                       27-Aug-2014 13:18      8M
    -util-linux-2.25.1-rc1.tar.sign                     27-Aug-2014 13:18     819
    -util-linux-2.25.1-rc1.tar.xz                       27-Aug-2014 13:18      4M
    -util-linux-2.25.1.tar.gz                           03-Sep-2014 10:41      8M
    -util-linux-2.25.1.tar.sign                         03-Sep-2014 10:41     819
    -util-linux-2.25.1.tar.xz                           03-Sep-2014 10:41      4M
    -util-linux-2.25.2.tar.gz                           24-Oct-2014 13:08      8M
    -util-linux-2.25.2.tar.sign                         24-Oct-2014 13:08     819
    -util-linux-2.25.2.tar.xz                           24-Oct-2014 13:08      4M
    -util-linux-2.25.tar.gz                             22-Jul-2014 09:50      8M
    -util-linux-2.25.tar.sign                           22-Jul-2014 09:50     819
    -util-linux-2.25.tar.xz                             22-Jul-2014 09:50      4M
    -v2.25-ChangeLog                                    22-Jul-2014 09:50     41K
    -v2.25-ChangeLog.sign                               22-Jul-2014 09:50     819
    -v2.25-ReleaseNotes                                 22-Jul-2014 09:50     61K
    -v2.25-ReleaseNotes.sign                            22-Jul-2014 09:50     819
    -v2.25-rc1-ChangeLog                                18-Jun-2014 13:33    489K
    -v2.25-rc1-ChangeLog.sign                           18-Jun-2014 13:33     819
    -v2.25-rc2-ChangeLog                                02-Jul-2014 10:02     27K
    -v2.25-rc2-ChangeLog.sign                           02-Jul-2014 10:02     819
    -v2.25.1-ChangeLog                                  03-Sep-2014 10:41    5816
    -v2.25.1-ChangeLog.sign                             03-Sep-2014 10:41     819
    -v2.25.1-ReleaseNotes                               03-Sep-2014 10:52    3220
    -v2.25.1-ReleaseNotes.sign                          03-Sep-2014 10:52     819
    -v2.25.1-rc1-ChangeLog                              27-Aug-2014 13:18     22K
    -v2.25.1-rc1-ChangeLog.sign                         27-Aug-2014 13:18     819
    -v2.25.2-ChangeLog                                  24-Oct-2014 13:08     26K
    -v2.25.2-ChangeLog.sign                             24-Oct-2014 13:08     819
    -v2.25.2-ReleaseNotes                               24-Oct-2014 13:08    3016
    -v2.25.2-ReleaseNotes.sign                          24-Oct-2014 13:08     819
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.26/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.26/index.html deleted file mode 100644 index b991489577..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.26/index.html +++ /dev/null @@ -1,42 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.26/ - -

    Index of /pub/linux/utils/util-linux/v2.26/


    ../
    -libblkid-docs/                                     30-Apr-2015 10:38       -
    -libfdisk-docs/                                     30-Apr-2015 10:40       -
    -libmount-docs/                                     30-Apr-2015 10:42       -
    -libsmartcols-docs/                                 30-Apr-2015 10:43       -
    -sha256sums.asc                                     12-May-2017 10:54    2480
    -util-linux-2.26-rc1.tar.gz                         14-Jan-2015 13:14      8M
    -util-linux-2.26-rc1.tar.sign                       14-Jan-2015 13:14     819
    -util-linux-2.26-rc1.tar.xz                         14-Jan-2015 13:14      4M
    -util-linux-2.26-rc2.tar.gz                         04-Feb-2015 11:49      8M
    -util-linux-2.26-rc2.tar.sign                       04-Feb-2015 11:49     819
    -util-linux-2.26-rc2.tar.xz                         04-Feb-2015 11:49      4M
    -util-linux-2.26.1.tar.gz                           13-Mar-2015 14:23      8M
    -util-linux-2.26.1.tar.sign                         13-Mar-2015 14:23     819
    -util-linux-2.26.1.tar.xz                           13-Mar-2015 14:23      4M
    -util-linux-2.26.2.tar.gz                           30-Apr-2015 10:44      8M
    -util-linux-2.26.2.tar.sign                         30-Apr-2015 10:44     819
    -util-linux-2.26.2.tar.xz                           30-Apr-2015 10:44      4M
    -util-linux-2.26.tar.gz                             19-Feb-2015 12:47      8M
    -util-linux-2.26.tar.sign                           19-Feb-2015 12:47     819
    -util-linux-2.26.tar.xz                             19-Feb-2015 12:47      4M
    -v2.26-ChangeLog                                    19-Feb-2015 12:47     30K
    -v2.26-ChangeLog.sign                               19-Feb-2015 12:47     819
    -v2.26-ReleaseNotes                                 19-Feb-2015 12:47     51K
    -v2.26-ReleaseNotes.sign                            19-Feb-2015 12:47     819
    -v2.26-rc1-ChangeLog                                14-Jan-2015 13:14    360K
    -v2.26-rc1-ChangeLog.sign                           14-Jan-2015 13:14     819
    -v2.26-rc2-ChangeLog                                04-Feb-2015 11:50     51K
    -v2.26-rc2-ChangeLog.sign                           04-Feb-2015 11:50     819
    -v2.26.1-ChangeLog                                  13-Mar-2015 14:23     32K
    -v2.26.1-ChangeLog.sign                             13-Mar-2015 14:23     819
    -v2.26.1-ReleaseNotes                               13-Mar-2015 14:23    2944
    -v2.26.1-ReleaseNotes.sign                          13-Mar-2015 14:23     819
    -v2.26.2-ChangeLog                                  30-Apr-2015 10:44     58K
    -v2.26.2-ChangeLog.sign                             30-Apr-2015 10:44     819
    -v2.26.2-ReleaseNotes                               30-Apr-2015 10:44    5834
    -v2.26.2-ReleaseNotes.sign                          30-Apr-2015 10:44     819
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.27/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.27/index.html deleted file mode 100644 index 14eb368367..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.27/index.html +++ /dev/null @@ -1,35 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.27/ - -

    Index of /pub/linux/utils/util-linux/v2.27/


    ../
    -libblkid-docs/                                     02-Nov-2015 11:01       -
    -libfdisk-docs/                                     02-Nov-2015 11:03       -
    -libmount-docs/                                     02-Nov-2015 11:04       -
    -libsmartcols-docs/                                 02-Nov-2015 11:06       -
    -sha256sums.asc                                     12-May-2017 10:54    2127
    -util-linux-2.27-rc1.tar.gz                         31-Jul-2015 11:01      8M
    -util-linux-2.27-rc1.tar.sign                       31-Jul-2015 11:01     819
    -util-linux-2.27-rc1.tar.xz                         31-Jul-2015 11:01      4M
    -util-linux-2.27-rc2.tar.gz                         24-Aug-2015 11:04      8M
    -util-linux-2.27-rc2.tar.sign                       24-Aug-2015 11:04     819
    -util-linux-2.27-rc2.tar.xz                         24-Aug-2015 11:04      4M
    -util-linux-2.27.1.tar.gz                           02-Nov-2015 11:06      8M
    -util-linux-2.27.1.tar.sign                         02-Nov-2015 11:06     819
    -util-linux-2.27.1.tar.xz                           02-Nov-2015 11:06      4M
    -util-linux-2.27.tar.gz                             07-Sep-2015 08:17      8M
    -util-linux-2.27.tar.sign                           07-Sep-2015 08:17     819
    -util-linux-2.27.tar.xz                             07-Sep-2015 08:17      4M
    -v2.27-ChangeLog                                    07-Sep-2015 08:17     21K
    -v2.27-ChangeLog.sign                               07-Sep-2015 08:17     819
    -v2.27-ReleaseNotes                                 07-Sep-2015 08:17     35K
    -v2.27-ReleaseNotes.sign                            07-Sep-2015 08:17     819
    -v2.27-rc1-ChangeLog                                31-Jul-2015 11:01    263K
    -v2.27-rc1-ChangeLog.sign                           31-Jul-2015 11:01     819
    -v2.27-rc2-ChangeLog                                24-Aug-2015 11:04     38K
    -v2.27-rc2-ChangeLog.sign                           24-Aug-2015 11:04     819
    -v2.27.1-ChangeLog                                  02-Nov-2015 11:06     18K
    -v2.27.1-ChangeLog.sign                             02-Nov-2015 11:06     819
    -v2.27.1-ReleaseNotes                               02-Nov-2015 11:06    2107
    -v2.27.1-ReleaseNotes.sign                          02-Nov-2015 11:06     819
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.28/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.28/index.html deleted file mode 100644 index 4bba6b4702..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.28/index.html +++ /dev/null @@ -1,42 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.28/ - -

    Index of /pub/linux/utils/util-linux/v2.28/


    ../
    -libblkid-docs/                                     07-Sep-2016 12:00       -
    -libfdisk-docs/                                     07-Sep-2016 12:02       -
    -libmount-docs/                                     07-Sep-2016 12:04       -
    -libsmartcols-docs/                                 07-Sep-2016 12:06       -
    -sha256sums.asc                                     12-May-2017 10:55    2480
    -util-linux-2.28-rc1.tar.gz                         11-Mar-2016 11:45      8M
    -util-linux-2.28-rc1.tar.sign                       11-Mar-2016 11:45     819
    -util-linux-2.28-rc1.tar.xz                         11-Mar-2016 11:45      4M
    -util-linux-2.28-rc2.tar.gz                         29-Mar-2016 09:04      8M
    -util-linux-2.28-rc2.tar.sign                       29-Mar-2016 09:04     819
    -util-linux-2.28-rc2.tar.xz                         29-Mar-2016 09:04      4M
    -util-linux-2.28.1.tar.gz                           11-Aug-2016 10:09      9M
    -util-linux-2.28.1.tar.sign                         11-Aug-2016 10:09     819
    -util-linux-2.28.1.tar.xz                           11-Aug-2016 10:09      4M
    -util-linux-2.28.2.tar.gz                           07-Sep-2016 12:06      9M
    -util-linux-2.28.2.tar.sign                         07-Sep-2016 12:06     819
    -util-linux-2.28.2.tar.xz                           07-Sep-2016 12:06      4M
    -util-linux-2.28.tar.gz                             12-Apr-2016 11:26      8M
    -util-linux-2.28.tar.sign                           12-Apr-2016 11:26     819
    -util-linux-2.28.tar.xz                             12-Apr-2016 11:26      4M
    -v2.28-ChangeLog                                    12-Apr-2016 11:26     13K
    -v2.28-ChangeLog.sign                               12-Apr-2016 11:26     819
    -v2.28-ReleaseNotes                                 12-Apr-2016 11:26     33K
    -v2.28-ReleaseNotes.sign                            12-Apr-2016 11:26     819
    -v2.28-rc1-ChangeLog                                11-Mar-2016 11:45    269K
    -v2.28-rc1-ChangeLog.sign                           11-Mar-2016 11:45     819
    -v2.28-rc2-ChangeLog                                29-Mar-2016 09:04     52K
    -v2.28-rc2-ChangeLog.sign                           29-Mar-2016 09:04     819
    -v2.28.1-ChangeLog                                  11-Aug-2016 10:09     37K
    -v2.28.1-ChangeLog.sign                             11-Aug-2016 10:09     819
    -v2.28.1-ReleaseNotes                               11-Aug-2016 10:09    3748
    -v2.28.1-ReleaseNotes.sign                          11-Aug-2016 10:09     819
    -v2.28.2-ChangeLog                                  07-Sep-2016 12:06    8900
    -v2.28.2-ChangeLog.sign                             07-Sep-2016 12:06     819
    -v2.28.2-ReleaseNotes                               07-Sep-2016 12:06    1161
    -v2.28.2-ReleaseNotes.sign                          07-Sep-2016 12:06     819
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.29/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.29/index.html deleted file mode 100644 index 916a255fa0..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.29/index.html +++ /dev/null @@ -1,42 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.29/ - -

    Index of /pub/linux/utils/util-linux/v2.29/


    ../
    -libblkid-docs/                                     22-Feb-2017 15:20       -
    -libfdisk-docs/                                     22-Feb-2017 15:22       -
    -libmount-docs/                                     22-Feb-2017 15:24       -
    -libsmartcols-docs/                                 22-Feb-2017 15:26       -
    -sha256sums.asc                                     12-May-2017 10:55    2480
    -util-linux-2.29-rc1.tar.gz                         30-Sep-2016 09:55      9M
    -util-linux-2.29-rc1.tar.sign                       30-Sep-2016 09:55     819
    -util-linux-2.29-rc1.tar.xz                         30-Sep-2016 09:55      4M
    -util-linux-2.29-rc2.tar.gz                         19-Oct-2016 13:13      9M
    -util-linux-2.29-rc2.tar.sign                       19-Oct-2016 13:13     819
    -util-linux-2.29-rc2.tar.xz                         19-Oct-2016 13:13      4M
    -util-linux-2.29.1.tar.gz                           20-Jan-2017 14:02      9M
    -util-linux-2.29.1.tar.sign                         20-Jan-2017 14:02     819
    -util-linux-2.29.1.tar.xz                           20-Jan-2017 14:02      4M
    -util-linux-2.29.2.tar.gz                           22-Feb-2017 15:26      9M
    -util-linux-2.29.2.tar.sign                         22-Feb-2017 15:26     819
    -util-linux-2.29.2.tar.xz                           22-Feb-2017 15:26      4M
    -util-linux-2.29.tar.gz                             08-Nov-2016 11:23      9M
    -util-linux-2.29.tar.sign                           08-Nov-2016 11:23     819
    -util-linux-2.29.tar.xz                             08-Nov-2016 11:23      4M
    -v2.29-ChangeLog                                    08-Nov-2016 11:23     28K
    -v2.29-ChangeLog.sign                               08-Nov-2016 11:23     819
    -v2.29-ReleaseNotes                                 08-Nov-2016 11:24     26K
    -v2.29-ReleaseNotes.sign                            08-Nov-2016 11:24     819
    -v2.29-rc1-ChangeLog                                30-Sep-2016 09:55    219K
    -v2.29-rc1-ChangeLog.sign                           30-Sep-2016 09:55     819
    -v2.29-rc2-ChangeLog                                19-Oct-2016 13:13     19K
    -v2.29-rc2-ChangeLog.sign                           19-Oct-2016 13:13     819
    -v2.29.1-ChangeLog                                  20-Jan-2017 14:02     47K
    -v2.29.1-ChangeLog.sign                             20-Jan-2017 14:02     819
    -v2.29.1-ReleaseNotes                               20-Jan-2017 14:02    5067
    -v2.29.1-ReleaseNotes.sign                          20-Jan-2017 14:02     819
    -v2.29.2-ChangeLog                                  22-Feb-2017 15:26     14K
    -v2.29.2-ChangeLog.sign                             22-Feb-2017 15:26     819
    -v2.29.2-ReleaseNotes                               22-Feb-2017 15:26    2012
    -v2.29.2-ReleaseNotes.sign                          22-Feb-2017 15:26     819
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.30/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.30/index.html deleted file mode 100644 index 0441bc0d20..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.30/index.html +++ /dev/null @@ -1,42 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.30/ - -

    Index of /pub/linux/utils/util-linux/v2.30/


    ../
    -libblkid-docs/                                     21-Sep-2017 09:49       -
    -libfdisk-docs/                                     21-Sep-2017 09:50       -
    -libmount-docs/                                     21-Sep-2017 09:50       -
    -libsmartcols-docs/                                 21-Sep-2017 09:51       -
    -sha256sums.asc                                     21-Sep-2017 09:52    2480
    -util-linux-2.30-rc1.tar.gz                         12-May-2017 12:02      9M
    -util-linux-2.30-rc1.tar.sign                       12-May-2017 12:02     819
    -util-linux-2.30-rc1.tar.xz                         12-May-2017 12:02      4M
    -util-linux-2.30-rc2.tar.gz                         23-May-2017 10:42      9M
    -util-linux-2.30-rc2.tar.sign                       23-May-2017 10:42     819
    -util-linux-2.30-rc2.tar.xz                         23-May-2017 10:42      4M
    -util-linux-2.30.1.tar.gz                           20-Jul-2017 09:33      9M
    -util-linux-2.30.1.tar.sign                         20-Jul-2017 09:33     819
    -util-linux-2.30.1.tar.xz                           20-Jul-2017 09:33      4M
    -util-linux-2.30.2.tar.gz                           21-Sep-2017 09:51      9M
    -util-linux-2.30.2.tar.sign                         21-Sep-2017 09:51     833
    -util-linux-2.30.2.tar.xz                           21-Sep-2017 09:51      4M
    -util-linux-2.30.tar.gz                             02-Jun-2017 10:44      9M
    -util-linux-2.30.tar.sign                           02-Jun-2017 10:44     819
    -util-linux-2.30.tar.xz                             02-Jun-2017 10:44      4M
    -v2.30-ChangeLog                                    02-Jun-2017 10:44     28K
    -v2.30-ChangeLog.sign                               02-Jun-2017 10:44     819
    -v2.30-ReleaseNotes                                 02-Jun-2017 10:44     34K
    -v2.30-ReleaseNotes.sign                            02-Jun-2017 10:44     819
    -v2.30-rc1-ChangeLog                                23-May-2017 10:42    318K
    -v2.30-rc1-ChangeLog.sign                           23-May-2017 10:42     819
    -v2.30-rc2-ChangeLog                                23-May-2017 10:42     19K
    -v2.30-rc2-ChangeLog.sign                           23-May-2017 10:42     819
    -v2.30.1-ChangeLog                                  20-Jul-2017 09:33     20K
    -v2.30.1-ChangeLog.sign                             20-Jul-2017 09:33     819
    -v2.30.1-ReleaseNotes                               20-Jul-2017 09:33    1901
    -v2.30.1-ReleaseNotes.sign                          20-Jul-2017 09:33     819
    -v2.30.2-ChangeLog                                  21-Sep-2017 09:51     13K
    -v2.30.2-ChangeLog.sign                             21-Sep-2017 09:51     833
    -v2.30.2-ReleaseNotes                               21-Sep-2017 09:51    1604
    -v2.30.2-ReleaseNotes.sign                          21-Sep-2017 09:51     833
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.31/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.31/index.html deleted file mode 100644 index 097e4e0e3c..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.31/index.html +++ /dev/null @@ -1,35 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.31/ - -

    Index of /pub/linux/utils/util-linux/v2.31/


    ../
    -libblkid-docs/                                     19-Dec-2017 15:16       -
    -libfdisk-docs/                                     19-Dec-2017 15:17       -
    -libmount-docs/                                     19-Dec-2017 15:17       -
    -libsmartcols-docs/                                 19-Dec-2017 15:17       -
    -sha256sums.asc                                     19-Dec-2017 15:20    2127
    -util-linux-2.31-rc1.tar.gz                         22-Sep-2017 10:39      9M
    -util-linux-2.31-rc1.tar.sign                       22-Sep-2017 10:39     833
    -util-linux-2.31-rc1.tar.xz                         22-Sep-2017 10:39      4M
    -util-linux-2.31-rc2.tar.gz                         03-Oct-2017 16:03      9M
    -util-linux-2.31-rc2.tar.sign                       03-Oct-2017 16:03     833
    -util-linux-2.31-rc2.tar.xz                         03-Oct-2017 16:03      4M
    -util-linux-2.31.1.tar.gz                           19-Dec-2017 15:18      9M
    -util-linux-2.31.1.tar.sign                         19-Dec-2017 15:18     833
    -util-linux-2.31.1.tar.xz                           19-Dec-2017 15:18      4M
    -util-linux-2.31.tar.gz                             19-Oct-2017 11:27      9M
    -util-linux-2.31.tar.sign                           19-Oct-2017 11:27     833
    -util-linux-2.31.tar.xz                             19-Oct-2017 11:27      4M
    -v2.31-ChangeLog                                    19-Oct-2017 11:27     15K
    -v2.31-ChangeLog.sign                               19-Oct-2017 11:27     833
    -v2.31-ReleaseNotes                                 19-Oct-2017 11:27     31K
    -v2.31-ReleaseNotes.sign                            19-Oct-2017 11:27     833
    -v2.31-rc1-ChangeLog                                22-Sep-2017 10:39    290K
    -v2.31-rc1-ChangeLog.sign                           22-Sep-2017 10:39     833
    -v2.31-rc2-ChangeLog                                03-Oct-2017 16:03     12K
    -v2.31-rc2-ChangeLog.sign                           03-Oct-2017 16:03     833
    -v2.31.1-ChangeLog                                  19-Dec-2017 15:18     27K
    -v2.31.1-ChangeLog.sign                             19-Dec-2017 15:18     833
    -v2.31.1-ReleaseNotes                               19-Dec-2017 15:18    3175
    -v2.31.1-ReleaseNotes.sign                          19-Dec-2017 15:18     833
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.32/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.32/index.html deleted file mode 100644 index d373e98109..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.32/index.html +++ /dev/null @@ -1,35 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.32/ - -

    Index of /pub/linux/utils/util-linux/v2.32/


    ../
    -libblkid-docs/                                     16-Jul-2018 11:27       -
    -libfdisk-docs/                                     16-Jul-2018 11:28       -
    -libmount-docs/                                     16-Jul-2018 11:28       -
    -libsmartcols-docs/                                 16-Jul-2018 11:28       -
    -sha256sums.asc                                     16-Jul-2018 11:30    2127
    -util-linux-2.32-rc1.tar.gz                         13-Feb-2018 12:25      9M
    -util-linux-2.32-rc1.tar.sign                       13-Feb-2018 12:25     833
    -util-linux-2.32-rc1.tar.xz                         13-Feb-2018 12:25      4M
    -util-linux-2.32-rc2.tar.gz                         01-Mar-2018 13:38      9M
    -util-linux-2.32-rc2.tar.sign                       01-Mar-2018 13:38     833
    -util-linux-2.32-rc2.tar.xz                         01-Mar-2018 13:38      4M
    -util-linux-2.32.1.tar.gz                           16-Jul-2018 11:29      9M
    -util-linux-2.32.1.tar.sign                         16-Jul-2018 11:29     833
    -util-linux-2.32.1.tar.xz                           16-Jul-2018 11:29      4M
    -util-linux-2.32.tar.gz                             21-Mar-2018 14:49      9M
    -util-linux-2.32.tar.sign                           21-Mar-2018 14:49     833
    -util-linux-2.32.tar.xz                             21-Mar-2018 14:49      4M
    -v2.32-ChangeLog                                    21-Mar-2018 14:49     36K
    -v2.32-ChangeLog.sign                               21-Mar-2018 14:49     833
    -v2.32-ReleaseNotes                                 21-Mar-2018 14:49     21K
    -v2.32-ReleaseNotes.sign                            21-Mar-2018 14:49     833
    -v2.32-rc1-ChangeLog                                13-Feb-2018 12:25    174K
    -v2.32-rc1-ChangeLog.sign                           13-Feb-2018 12:25     833
    -v2.32-rc2-ChangeLog                                01-Mar-2018 13:38     21K
    -v2.32-rc2-ChangeLog.sign                           01-Mar-2018 13:38     833
    -v2.32.1-ChangeLog                                  16-Jul-2018 11:29     31K
    -v2.32.1-ChangeLog.sign                             16-Jul-2018 11:29     833
    -v2.32.1-ReleaseNotes                               16-Jul-2018 11:29    3425
    -v2.32.1-ReleaseNotes.sign                          16-Jul-2018 11:29     833
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.33/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.33/index.html deleted file mode 100644 index 5495305422..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.33/index.html +++ /dev/null @@ -1,42 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.33/ - -

    Index of /pub/linux/utils/util-linux/v2.33/


    ../
    -libblkid-docs/                                     09-Apr-2019 13:55       -
    -libfdisk-docs/                                     09-Apr-2019 13:56       -
    -libmount-docs/                                     09-Apr-2019 13:56       -
    -libsmartcols-docs/                                 09-Apr-2019 13:56       -
    -sha256sums.asc                                     09-Apr-2019 14:01    2480
    -util-linux-2.33-rc1.tar.gz                         25-Sep-2018 10:34      9M
    -util-linux-2.33-rc1.tar.sign                       25-Sep-2018 10:34     833
    -util-linux-2.33-rc1.tar.xz                         25-Sep-2018 10:34      4M
    -util-linux-2.33-rc2.tar.gz                         19-Oct-2018 11:44      9M
    -util-linux-2.33-rc2.tar.sign                       19-Oct-2018 11:44     833
    -util-linux-2.33-rc2.tar.xz                         19-Oct-2018 11:44      4M
    -util-linux-2.33.1.tar.gz                           09-Jan-2019 10:28      9M
    -util-linux-2.33.1.tar.sign                         09-Jan-2019 10:28     833
    -util-linux-2.33.1.tar.xz                           09-Jan-2019 10:28      4M
    -util-linux-2.33.2.tar.gz                           09-Apr-2019 13:57     10M
    -util-linux-2.33.2.tar.sign                         09-Apr-2019 13:57     833
    -util-linux-2.33.2.tar.xz                           09-Apr-2019 13:57      4M
    -util-linux-2.33.tar.gz                             06-Nov-2018 11:25      9M
    -util-linux-2.33.tar.sign                           06-Nov-2018 11:25     833
    -util-linux-2.33.tar.xz                             06-Nov-2018 11:25      4M
    -v2.33-ChangeLog                                    06-Nov-2018 11:25    7977
    -v2.33-ChangeLog.sign                               06-Nov-2018 11:25     833
    -v2.33-ReleaseNotes                                 06-Nov-2018 11:25     27K
    -v2.33-ReleaseNotes.sign                            06-Nov-2018 11:25     833
    -v2.33-rc1-ChangeLog                                25-Sep-2018 10:34    210K
    -v2.33-rc1-ChangeLog.sign                           25-Sep-2018 10:34     833
    -v2.33-rc2-ChangeLog                                19-Oct-2018 11:44     18K
    -v2.33-rc2-ChangeLog.sign                           19-Oct-2018 11:44     833
    -v2.33.1-ChangeLog                                  09-Jan-2019 10:28     17K
    -v2.33.1-ChangeLog.sign                             09-Jan-2019 10:28     833
    -v2.33.1-ReleaseNotes                               09-Jan-2019 10:28    1899
    -v2.33.1-ReleaseNotes.sign                          09-Jan-2019 10:28     833
    -v2.33.2-ChangeLog                                  09-Apr-2019 13:57     21K
    -v2.33.2-ChangeLog.sign                             09-Apr-2019 13:57     833
    -v2.33.2-ReleaseNotes                               09-Apr-2019 13:57    2566
    -v2.33.2-ReleaseNotes.sign                          09-Apr-2019 13:57     833
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.34/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.34/index.html deleted file mode 100644 index bd9d9c87bb..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.34/index.html +++ /dev/null @@ -1,28 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.34/ - -

    Index of /pub/linux/utils/util-linux/v2.34/


    ../
    -libblkid-docs/                                     14-Jun-2019 10:45       -
    -libfdisk-docs/                                     14-Jun-2019 10:45       -
    -libmount-docs/                                     14-Jun-2019 10:45       -
    -libsmartcols-docs/                                 14-Jun-2019 10:46       -
    -sha256sums.asc                                     14-Jun-2019 10:51    1774
    -util-linux-2.34-rc1.tar.gz                         30-Apr-2019 10:24     10M
    -util-linux-2.34-rc1.tar.sign                       30-Apr-2019 10:24     833
    -util-linux-2.34-rc1.tar.xz                         30-Apr-2019 10:24      5M
    -util-linux-2.34-rc2.tar.gz                         30-May-2019 10:24     10M
    -util-linux-2.34-rc2.tar.sign                       30-May-2019 10:24     833
    -util-linux-2.34-rc2.tar.xz                         30-May-2019 10:24      5M
    -util-linux-2.34.tar.gz                             14-Jun-2019 10:46     10M
    -util-linux-2.34.tar.sign                           14-Jun-2019 10:46     833
    -util-linux-2.34.tar.xz                             14-Jun-2019 10:46      5M
    -v2.34-ChangeLog                                    14-Jun-2019 10:46     14K
    -v2.34-ChangeLog.sign                               14-Jun-2019 10:46     833
    -v2.34-ReleaseNotes                                 14-Jun-2019 10:46     27K
    -v2.34-ReleaseNotes.sign                            14-Jun-2019 10:46     833
    -v2.34-rc1-ChangeLog                                30-Apr-2019 10:24    167K
    -v2.34-rc1-ChangeLog.sign                           30-Apr-2019 10:24     833
    -v2.34-rc2-ChangeLog                                30-May-2019 10:24     57K
    -v2.34-rc2-ChangeLog.sign                           30-May-2019 10:24     833
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.35/index.html b/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.35/index.html deleted file mode 100644 index aa714d3918..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/pub/linux/utils/util-linux/v2.35/index.html +++ /dev/null @@ -1,18 +0,0 @@ - -Index of /pub/linux/utils/util-linux/v2.35/ - -

    Index of /pub/linux/utils/util-linux/v2.35/


    ../
    -libblkid-docs/                                     11-Dec-2019 10:04       -
    -libfdisk-docs/                                     11-Dec-2019 10:05       -
    -libmount-docs/                                     11-Dec-2019 10:05       -
    -libsmartcols-docs/                                 11-Dec-2019 10:05       -
    -sha256sums.asc                                     11-Dec-2019 10:11    1242
    -util-linux-2.35-rc1.tar.gz                         11-Dec-2019 10:06     10M
    -util-linux-2.35-rc1.tar.sign                       11-Dec-2019 10:06     833
    -util-linux-2.35-rc1.tar.xz                         11-Dec-2019 10:06      5M
    -v2.35-ReleaseNotes                                 11-Dec-2019 10:06     21K
    -v2.35-ReleaseNotes.sign                            11-Dec-2019 10:06     833
    -v2.35-rc1-ChangeLog                                11-Dec-2019 10:06    228K
    -v2.35-rc1-ChangeLog.sign                           11-Dec-2019 10:06     833
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/releases/eglibc/index.html b/bitbake/lib/bb/tests/fetch-testdata/releases/eglibc/index.html deleted file mode 100644 index b26794021d..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/releases/eglibc/index.html +++ /dev/null @@ -1,21 +0,0 @@ - -Index of /releases/eglibc/ - -

    Index of /releases/eglibc/


    ../
    -eglibc-2.16-svnr21224.tar.bz2                      17-Oct-2012 18:01            17310656
    -eglibc-2.16-svnr21224.tar.bz2.md5                  17-Oct-2012 21:53                  64
    -eglibc-2.16-svnr21224.tar.bz2.sha1                 17-Oct-2012 21:53                  72
    -eglibc-2.17-svnr22064.tar.bz2                      04-Jan-2013 05:44            17565519
    -eglibc-2.17-svnr22064.tar.bz2.asc                  04-Jan-2013 05:45                 302
    -eglibc-2.17-svnr22064.tar.bz2.md5                  04-Jan-2013 05:44                  64
    -eglibc-2.17-svnr22064.tar.bz2.sha1                 04-Jan-2013 05:44                  72
    -eglibc-2.18-svnr23787.tar.bz2                      21-Aug-2013 05:36            17862773
    -eglibc-2.18-svnr23787.tar.bz2.asc                  21-Aug-2013 05:36                 302
    -eglibc-2.18-svnr23787.tar.bz2.md5                  21-Aug-2013 05:36                  64
    -eglibc-2.18-svnr23787.tar.bz2.sha1                 21-Aug-2013 05:36                  72
    -eglibc-2.19-svnr25243.tar.bz2                      08-Feb-2014 10:06            18873620
    -eglibc-2.19-svnr25243.tar.bz2.asc                  08-Feb-2014 10:06                 285
    -eglibc-2.19-svnr25243.tar.bz2.md5                  08-Feb-2014 10:06                  64
    -eglibc-2.19-svnr25243.tar.bz2.sha1                 08-Feb-2014 10:06                  72
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/releases/gnu-config/index.html b/bitbake/lib/bb/tests/fetch-testdata/releases/gnu-config/index.html deleted file mode 100644 index 051aa4812f..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/releases/gnu-config/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Index of /releases/gnu-config/ - -

    Index of /releases/gnu-config/


    ../
    -SHA256SUM                                          03-Oct-2012 17:23                 190
    -gnu-config-20120814.tar.bz2                        18-Sep-2012 09:28               43026
    -gnu-config-yocto-20111111.tgz                      08-May-2012 21:11               48762
    -

    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/releases/individual/xserver/index.html b/bitbake/lib/bb/tests/fetch-testdata/releases/individual/xserver/index.html deleted file mode 100644 index 72e0d65e02..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/releases/individual/xserver/index.html +++ /dev/null @@ -1,609 +0,0 @@ - - - - Index of /releases/individual/xserver - - -

    Index of /releases/individual/xserver

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [   ]xorg-server-1.0.1.tar.bz22006-01-18 23:51 5.7M 
    [   ]xorg-server-1.0.1.tar.gz2006-01-18 23:51 7.7M 
    [   ]xorg-server-1.0.2.tar.bz22006-03-20 14:01 5.7M 
    [   ]xorg-server-1.0.2.tar.gz2006-03-20 14:02 7.6M 
    [   ]xorg-server-1.0.99.2.tar.bz22006-04-02 00:47 6.1M 
    [   ]xorg-server-1.0.99.2.tar.gz2006-04-02 00:48 8.3M 
    [   ]xorg-server-1.0.99.901.tar.bz22006-04-07 22:51 6.0M 
    [   ]xorg-server-1.0.99.901.tar.gz2006-04-07 22:51 8.2M 
    [   ]xorg-server-1.0.99.902.tar.bz22006-04-28 23:17 6.0M 
    [   ]xorg-server-1.0.99.902.tar.gz2006-04-28 23:16 8.2M 
    [   ]xorg-server-1.0.99.903.tar.bz22006-05-12 20:54 6.0M 
    [   ]xorg-server-1.0.99.903.tar.gz2006-05-12 20:52 8.3M 
    [   ]xorg-server-1.1.0.tar.bz22006-05-22 23:31 6.0M 
    [   ]xorg-server-1.1.0.tar.gz2006-05-22 23:29 8.3M 
    [   ]xorg-server-1.1.1.tar.bz22006-07-08 00:57 6.0M 
    [   ]xorg-server-1.1.1.tar.gz2006-07-08 00:59 8.0M 
    [   ]xorg-server-1.1.99.901.tar.bz22006-10-13 23:06 6.0M 
    [   ]xorg-server-1.1.99.901.tar.gz2006-10-13 23:08 8.1M 
    [   ]xorg-server-1.1.99.902.tar.bz22006-11-13 22:04 5.8M 
    [   ]xorg-server-1.1.99.902.tar.gz2006-11-13 22:06 7.8M 
    [   ]xorg-server-1.1.99.903.tar.bz22006-12-02 00:14 5.7M 
    [   ]xorg-server-1.1.99.903.tar.gz2006-12-02 00:17 7.7M 
    [   ]xorg-server-1.2.0.tar.bz22007-01-23 06:15 5.7M 
    [   ]xorg-server-1.2.0.tar.gz2007-01-23 06:17 7.7M 
    [   ]xorg-server-1.2.99.0.tar.bz22006-11-02 03:15 5.9M 
    [   ]xorg-server-1.2.99.0.tar.gz2006-11-02 03:17 7.9M 
    [   ]xorg-server-1.2.99.901.tar.bz22007-03-05 05:11 5.7M 
    [   ]xorg-server-1.2.99.901.tar.gz2007-03-05 05:14 7.6M 
    [   ]xorg-server-1.2.99.902.tar.bz22007-03-14 19:38 5.7M 
    [   ]xorg-server-1.2.99.902.tar.gz2007-03-14 19:43 7.6M 
    [   ]xorg-server-1.2.99.903.tar.bz22007-03-27 05:01 5.7M 
    [   ]xorg-server-1.2.99.903.tar.gz2007-03-27 05:05 7.6M 
    [   ]xorg-server-1.2.99.904.tar.bz22007-04-06 06:31 4.2M 
    [   ]xorg-server-1.2.99.904.tar.gz2007-04-06 06:28 7.6M 
    [   ]xorg-server-1.2.99.905.tar.bz22007-04-06 07:01 5.7M 
    [   ]xorg-server-1.2.99.905.tar.gz2007-04-06 06:57 7.6M 
    [   ]xorg-server-1.3.0.0.tar.bz22007-04-20 02:45 5.7M 
    [   ]xorg-server-1.3.0.0.tar.gz2007-04-20 02:42 7.6M 
    [   ]xorg-server-1.3.99.0.tar.bz22007-08-01 05:38 6.0M 
    [   ]xorg-server-1.3.99.0.tar.gz2007-08-01 05:36 8.0M 
    [   ]xorg-server-1.3.99.2.tar.bz22007-09-01 03:12 5.9M 
    [   ]xorg-server-1.3.99.2.tar.gz2007-09-01 03:10 7.8M 
    [   ]xorg-server-1.4.0.90.tar.bz22007-12-12 20:44 6.0M 
    [   ]xorg-server-1.4.0.90.tar.gz2007-12-12 20:43 8.0M 
    [   ]xorg-server-1.4.1.tar.bz22008-06-10 15:57 6.0M 
    [   ]xorg-server-1.4.1.tar.gz2008-06-10 15:56 8.1M 
    [   ]xorg-server-1.4.2.tar.bz22008-06-11 15:08 5.9M 
    [   ]xorg-server-1.4.2.tar.gz2008-06-11 15:08 7.9M 
    [   ]xorg-server-1.4.99.901.tar.bz22008-03-06 05:25 5.3M 
    [   ]xorg-server-1.4.99.901.tar.gz2008-03-06 05:23 7.1M 
    [   ]xorg-server-1.4.99.902.tar.bz22008-05-22 19:16 5.3M 
    [   ]xorg-server-1.4.99.902.tar.gz2008-05-22 19:16 7.1M 
    [   ]xorg-server-1.4.99.904.tar.bz22008-06-30 15:42 5.4M 
    [   ]xorg-server-1.4.99.904.tar.gz2008-06-30 15:42 7.3M 
    [   ]xorg-server-1.4.99.905.tar.bz22008-06-30 20:31 5.4M 
    [   ]xorg-server-1.4.99.905.tar.gz2008-06-30 20:31 7.3M 
    [   ]xorg-server-1.4.99.906.tar.bz22008-07-23 18:55 5.4M 
    [   ]xorg-server-1.4.99.906.tar.gz2008-07-23 18:55 7.3M 
    [   ]xorg-server-1.4.tar.bz22007-09-06 09:23 5.9M 
    [   ]xorg-server-1.4.tar.gz2007-09-06 09:22 7.8M 
    [   ]xorg-server-1.5.0.tar.bz22008-09-03 23:16 5.4M 
    [   ]xorg-server-1.5.0.tar.gz2008-09-03 23:16 7.3M 
    [   ]xorg-server-1.5.1.tar.bz22008-09-23 19:15 5.4M 
    [   ]xorg-server-1.5.1.tar.gz2008-09-23 19:15 7.3M 
    [   ]xorg-server-1.5.2.tar.bz22008-10-10 19:27 5.4M 
    [   ]xorg-server-1.5.2.tar.gz2008-10-10 19:27 7.3M 
    [   ]xorg-server-1.5.3.tar.bz22008-11-05 20:49 5.4M 
    [   ]xorg-server-1.5.3.tar.gz2008-11-05 20:49 7.3M 
    [   ]xorg-server-1.5.99.1.tar.bz22008-11-26 07:29 4.4M 
    [   ]xorg-server-1.5.99.1.tar.gz2008-11-26 07:26 5.9M 
    [   ]xorg-server-1.5.99.2.tar.bz22008-12-03 08:16 4.4M 
    [   ]xorg-server-1.5.99.2.tar.gz2008-12-03 08:09 5.9M 
    [   ]xorg-server-1.5.99.3.tar.bz22008-12-10 06:28 4.4M 
    [   ]xorg-server-1.5.99.3.tar.gz2008-12-10 06:25 5.9M 
    [   ]xorg-server-1.5.99.901.tar.bz22009-01-12 21:12 4.4M 
    [   ]xorg-server-1.5.99.901.tar.gz2009-01-12 21:12 6.0M 
    [   ]xorg-server-1.5.99.902.tar.bz22009-01-31 05:27 4.4M 
    [   ]xorg-server-1.5.99.902.tar.gz2009-01-31 05:24 6.0M 
    [   ]xorg-server-1.5.99.903.tar.bz22009-02-18 06:35 4.4M 
    [   ]xorg-server-1.5.99.903.tar.gz2009-02-18 06:32 6.0M 
    [   ]xorg-server-1.6.0.tar.bz22009-02-25 20:25 4.4M 
    [   ]xorg-server-1.6.0.tar.gz2009-02-25 20:19 6.0M 
    [   ]xorg-server-1.6.1.901.tar.bz22009-05-09 05:46 4.4M 
    [   ]xorg-server-1.6.1.901.tar.gz2009-05-09 05:42 5.8M 
    [   ]xorg-server-1.6.1.902.tar.bz22009-06-29 23:05 4.4M 
    [   ]xorg-server-1.6.1.902.tar.gz2009-06-29 23:02 5.9M 
    [   ]xorg-server-1.6.1.tar.bz22009-04-14 20:09 4.4M 
    [   ]xorg-server-1.6.1.tar.gz2009-04-14 20:09 5.8M 
    [   ]xorg-server-1.6.2.901.tar.bz22009-07-26 21:42 4.4M 
    [   ]xorg-server-1.6.2.901.tar.gz2009-07-26 21:41 5.9M 
    [   ]xorg-server-1.6.2.tar.bz22009-07-07 23:40 4.4M 
    [   ]xorg-server-1.6.2.tar.gz2009-07-07 23:39 5.9M 
    [   ]xorg-server-1.6.3.901.tar.bz22009-08-26 05:55 4.5M 
    [   ]xorg-server-1.6.3.901.tar.gz2009-08-26 05:54 5.9M 
    [   ]xorg-server-1.6.3.tar.bz22009-08-01 06:45 4.4M 
    [   ]xorg-server-1.6.3.tar.gz2009-08-01 06:42 5.9M 
    [   ]xorg-server-1.6.4.901.tar.bz22009-10-03 07:40 4.5M 
    [   ]xorg-server-1.6.4.901.tar.gz2009-10-03 07:44 5.9M 
    [   ]xorg-server-1.6.4.tar.bz22009-09-28 02:47 4.5M 
    [   ]xorg-server-1.6.4.tar.gz2009-09-28 02:45 5.9M 
    [   ]xorg-server-1.6.5.tar.bz22009-10-12 05:27 4.5M 
    [   ]xorg-server-1.6.5.tar.gz2009-10-12 05:26 5.9M 
    [   ]xorg-server-1.6.99.900.tar.bz22009-09-04 07:02 4.7M 
    [   ]xorg-server-1.6.99.900.tar.gz2009-09-04 07:02 6.2M 
    [   ]xorg-server-1.6.99.901.tar.bz22009-09-14 10:26 4.7M 
    [   ]xorg-server-1.6.99.901.tar.gz2009-09-14 10:23 6.2M 
    [   ]xorg-server-1.6.99.902.tar.bz22009-09-22 04:53 4.7M 
    [   ]xorg-server-1.6.99.902.tar.gz2009-09-22 04:50 6.2M 
    [   ]xorg-server-1.6.99.903.tar.bz22009-09-28 11:25 4.7M 
    [   ]xorg-server-1.6.99.903.tar.gz2009-09-28 11:22 6.2M 
    [   ]xorg-server-1.7.0.901.tar.bz22009-10-12 04:40 4.7M 
    [   ]xorg-server-1.7.0.901.tar.gz2009-10-12 04:39 6.3M 
    [   ]xorg-server-1.7.0.902.tar.bz22009-10-19 02:10 4.7M 
    [   ]xorg-server-1.7.0.902.tar.gz2009-10-19 02:10 6.3M 
    [   ]xorg-server-1.7.0.tar.bz22009-10-02 06:17 4.7M 
    [   ]xorg-server-1.7.0.tar.gz2009-10-02 06:16 6.3M 
    [   ]xorg-server-1.7.1.901.tar.bz22009-11-06 05:11 4.7M 
    [   ]xorg-server-1.7.1.901.tar.gz2009-11-06 05:11 6.3M 
    [   ]xorg-server-1.7.1.902.tar.bz22009-11-20 05:52 4.7M 
    [   ]xorg-server-1.7.1.902.tar.gz2009-11-20 05:51 6.3M 
    [   ]xorg-server-1.7.1.tar.bz22009-10-23 05:40 4.7M 
    [   ]xorg-server-1.7.1.tar.gz2009-10-23 05:39 6.3M 
    [   ]xorg-server-1.7.2.tar.bz22009-11-27 05:46 4.7M 
    [   ]xorg-server-1.7.2.tar.gz2009-11-27 05:45 6.3M 
    [   ]xorg-server-1.7.3.901.tar.bz22009-12-11 06:40 4.7M 
    [   ]xorg-server-1.7.3.901.tar.gz2009-12-11 06:40 6.3M 
    [   ]xorg-server-1.7.3.902.tar.bz22009-12-26 01:08 4.7M 
    [   ]xorg-server-1.7.3.902.tar.gz2009-12-26 01:05 6.3M 
    [   ]xorg-server-1.7.3.tar.bz22009-12-03 03:38 4.7M 
    [   ]xorg-server-1.7.3.tar.gz2009-12-03 03:37 6.3M 
    [   ]xorg-server-1.7.4.901.tar.bz22010-01-23 00:16 4.7M 
    [   ]xorg-server-1.7.4.901.tar.gz2010-01-23 00:16 6.3M 
    [   ]xorg-server-1.7.4.902.tar.bz22010-02-05 08:37 4.7M 
    [   ]xorg-server-1.7.4.902.tar.gz2010-02-05 08:32 6.3M 
    [   ]xorg-server-1.7.4.tar.bz22010-01-08 01:09 4.7M 
    [   ]xorg-server-1.7.4.tar.gz2010-01-08 01:09 6.3M 
    [   ]xorg-server-1.7.5.901.tar.bz22010-03-05 00:26 4.7M 
    [   ]xorg-server-1.7.5.901.tar.gz2010-03-05 00:23 6.3M 
    [   ]xorg-server-1.7.5.902.tar.bz22010-03-12 07:07 4.7M 
    [   ]xorg-server-1.7.5.902.tar.gz2010-03-12 07:02 6.3M 
    [   ]xorg-server-1.7.5.tar.bz22010-02-16 03:54 4.7M 
    [   ]xorg-server-1.7.5.tar.gz2010-02-16 03:50 6.3M 
    [   ]xorg-server-1.7.6.901.tar.bz22010-04-12 02:12 4.7M 
    [   ]xorg-server-1.7.6.901.tar.gz2010-04-12 02:12 6.3M 
    [   ]xorg-server-1.7.6.902.tar.bz22010-04-21 00:25 4.7M 
    [   ]xorg-server-1.7.6.902.tar.gz2010-04-21 00:25 6.3M 
    [   ]xorg-server-1.7.6.tar.bz22010-03-17 01:56 4.7M 
    [   ]xorg-server-1.7.6.tar.gz2010-03-17 01:56 6.3M 
    [   ]xorg-server-1.7.7.tar.bz22010-05-04 07:51 4.7M 
    [   ]xorg-server-1.7.7.tar.gz2010-05-04 07:48 6.3M 
    [   ]xorg-server-1.7.99.1.tar.bz22009-10-21 16:15 4.8M 
    [   ]xorg-server-1.7.99.1.tar.gz2009-10-21 16:15 6.5M 
    [   ]xorg-server-1.7.99.2.tar.bz22009-12-20 03:50 4.8M 
    [   ]xorg-server-1.7.99.2.tar.gz2009-12-20 03:48 6.5M 
    [   ]xorg-server-1.7.99.901.tar.bz22010-02-12 22:00 4.9M 
    [   ]xorg-server-1.7.99.901.tar.gz2010-02-12 21:59 6.5M 
    [   ]xorg-server-1.7.99.902.tar.bz22010-03-22 05:42 4.9M 
    [   ]xorg-server-1.7.99.902.tar.gz2010-03-22 05:41 6.5M 
    [   ]xorg-server-1.8.0.901.tar.bz22010-04-27 05:08 4.9M 
    [   ]xorg-server-1.8.0.901.tar.gz2010-04-27 05:05 6.6M 
    [   ]xorg-server-1.8.0.902.tar.bz22010-05-04 00:39 4.9M 
    [   ]xorg-server-1.8.0.902.tar.gz2010-05-04 00:36 6.6M 
    [   ]xorg-server-1.8.0.tar.bz22010-04-02 07:30 4.9M 
    [   ]xorg-server-1.8.0.tar.gz2010-04-02 07:28 6.5M 
    [   ]xorg-server-1.8.1.901.tar.bz22010-06-02 00:07 5.0M 
    [   ]xorg-server-1.8.1.901.tar.gz2010-06-02 00:07 6.6M 
    [   ]xorg-server-1.8.1.902.tar.bz22010-06-21 02:07 5.0M 
    [   ]xorg-server-1.8.1.902.tar.gz2010-06-21 02:07 6.6M 
    [   ]xorg-server-1.8.1.tar.bz22010-05-11 21:52 5.0M 
    [   ]xorg-server-1.8.1.tar.gz2010-05-11 21:52 6.6M 
    [   ]xorg-server-1.8.2.tar.bz22010-07-01 05:29 5.0M 
    [   ]xorg-server-1.8.2.tar.gz2010-07-01 05:29 6.6M 
    [   ]xorg-server-1.8.99.901.tar.bz22010-06-15 21:19 5.1M 
    [   ]xorg-server-1.8.99.901.tar.gz2010-06-15 21:18 6.8M 
    [   ]xorg-server-1.8.99.902.tar.bz22010-06-22 19:05 5.1M 
    [   ]xorg-server-1.8.99.902.tar.gz2010-06-22 19:04 6.8M 
    [   ]xorg-server-1.8.99.903.tar.bz22010-06-22 19:33 5.1M 
    [   ]xorg-server-1.8.99.903.tar.gz2010-06-22 19:32 6.8M 
    [   ]xorg-server-1.8.99.904.tar.bz22010-07-01 13:46 5.1M 
    [   ]xorg-server-1.8.99.904.tar.gz2010-07-01 13:39 6.8M 
    [   ]xorg-server-1.8.99.905.tar.bz22010-07-14 19:58 5.1M 
    [   ]xorg-server-1.8.99.905.tar.gz2010-07-14 19:58 6.8M 
    [   ]xorg-server-1.8.99.906.tar.bz22010-08-13 06:22 5.1M 
    [   ]xorg-server-1.8.99.906.tar.gz2010-08-13 06:20 6.8M 
    [   ]xorg-server-1.9.0.901.tar.bz22010-10-01 21:22 5.0M 
    [   ]xorg-server-1.9.0.901.tar.gz2010-10-01 21:21 6.7M 
    [   ]xorg-server-1.9.0.902.tar.bz22010-10-15 18:34 5.0M 
    [   ]xorg-server-1.9.0.902.tar.gz2010-10-15 18:33 6.7M 
    [   ]xorg-server-1.9.0.tar.bz22010-08-21 00:46 5.1M 
    [   ]xorg-server-1.9.0.tar.gz2010-08-21 00:45 6.8M 
    [   ]xorg-server-1.9.1.tar.bz22010-10-24 03:22 5.0M 
    [   ]xorg-server-1.9.1.tar.gz2010-10-24 03:22 6.7M 
    [   ]xorg-server-1.9.2.901.tar.bz22010-11-14 00:12 5.0M 
    [   ]xorg-server-1.9.2.901.tar.gz2010-11-14 00:12 6.8M 
    [   ]xorg-server-1.9.2.902.tar.bz22010-12-04 19:25 5.0M 
    [   ]xorg-server-1.9.2.902.tar.gz2010-12-04 19:25 6.8M 
    [   ]xorg-server-1.9.2.tar.bz22010-10-31 23:15 5.0M 
    [   ]xorg-server-1.9.2.tar.gz2010-10-31 23:15 6.8M 
    [   ]xorg-server-1.9.3.901.tar.bz22011-01-08 21:37 5.0M 
    [   ]xorg-server-1.9.3.901.tar.gz2011-01-08 21:37 6.8M 
    [   ]xorg-server-1.9.3.902.tar.bz22011-01-31 01:16 5.0M 
    [   ]xorg-server-1.9.3.902.tar.gz2011-01-31 01:16 6.7M 
    [   ]xorg-server-1.9.3.tar.bz22010-12-13 20:05 5.0M 
    [   ]xorg-server-1.9.3.tar.gz2010-12-13 20:05 6.8M 
    [   ]xorg-server-1.9.4.901.tar.bz22011-03-04 23:21 4.9M 
    [   ]xorg-server-1.9.4.901.tar.gz2011-03-04 23:21 6.7M 
    [   ]xorg-server-1.9.4.tar.bz22011-02-04 20:03 5.0M 
    [   ]xorg-server-1.9.4.tar.gz2011-02-04 20:02 6.7M 
    [   ]xorg-server-1.9.5.tar.bz22011-03-17 21:49 4.9M 
    [   ]xorg-server-1.9.5.tar.gz2011-03-17 21:49 6.7M 
    [   ]xorg-server-1.9.99.901.tar.bz22010-12-07 04:57 5.1M 
    [   ]xorg-server-1.9.99.901.tar.gz2010-12-07 04:56 6.8M 
    [   ]xorg-server-1.9.99.902.tar.bz22011-02-18 22:50 5.1M 
    [   ]xorg-server-1.9.99.902.tar.gz2011-02-18 22:49 6.8M 
    [   ]xorg-server-1.9.99.903.tar.bz22011-02-25 06:46 5.0M 
    [   ]xorg-server-1.9.99.903.tar.gz2011-02-25 06:44 6.8M 
    [   ]xorg-server-1.10.0.901.tar.bz22011-03-29 07:36 5.1M 
    [   ]xorg-server-1.10.0.901.tar.gz2011-03-29 07:36 6.9M 
    [   ]xorg-server-1.10.0.902.tar.bz22011-04-08 23:49 5.1M 
    [   ]xorg-server-1.10.0.902.tar.gz2011-04-08 23:48 6.9M 
    [   ]xorg-server-1.10.0.tar.bz22011-02-26 05:49 5.1M 
    [   ]xorg-server-1.10.0.tar.gz2011-02-26 05:48 6.9M 
    [   ]xorg-server-1.10.1.901.tar.bz22011-05-06 22:59 5.1M 
    [   ]xorg-server-1.10.1.901.tar.gz2011-05-06 22:59 6.9M 
    [   ]xorg-server-1.10.1.902.tar.bz22011-05-21 06:13 5.1M 
    [   ]xorg-server-1.10.1.902.tar.gz2011-05-21 06:13 6.9M 
    [   ]xorg-server-1.10.1.tar.bz22011-04-16 01:13 5.1M 
    [   ]xorg-server-1.10.1.tar.gz2011-04-16 01:13 6.9M 
    [   ]xorg-server-1.10.2.901.tar.bz22011-06-17 17:30 5.1M 
    [   ]xorg-server-1.10.2.901.tar.gz2011-06-17 17:30 6.9M 
    [   ]xorg-server-1.10.2.902.tar.bz22011-07-02 03:55 5.1M 
    [   ]xorg-server-1.10.2.902.tar.gz2011-07-02 03:55 6.9M 
    [   ]xorg-server-1.10.2.tar.bz22011-05-29 00:20 5.1M 
    [   ]xorg-server-1.10.2.tar.gz2011-05-29 00:20 6.9M 
    [   ]xorg-server-1.10.3.901.tar.bz22011-07-29 18:39 5.1M 
    [   ]xorg-server-1.10.3.901.tar.gz2011-07-29 18:39 7.0M 
    [   ]xorg-server-1.10.3.902.tar.bz22011-08-12 22:30 5.1M 
    [   ]xorg-server-1.10.3.902.tar.gz2011-08-12 22:30 7.0M 
    [   ]xorg-server-1.10.3.tar.bz22011-07-08 20:04 5.1M 
    [   ]xorg-server-1.10.3.tar.gz2011-07-08 20:04 6.9M 
    [   ]xorg-server-1.10.4.tar.bz22011-08-19 07:13 5.1M 
    [   ]xorg-server-1.10.4.tar.gz2011-08-19 07:12 7.0M 
    [   ]xorg-server-1.10.6.tar.bz22012-02-11 01:11 5.2M 
    [   ]xorg-server-1.10.6.tar.gz2012-02-11 01:11 7.0M 
    [   ]xorg-server-1.10.99.901.tar.bz22011-06-01 18:34 4.7M 
    [   ]xorg-server-1.10.99.901.tar.gz2011-06-01 18:33 6.3M 
    [   ]xorg-server-1.10.99.902.tar.bz22011-08-04 04:10 4.8M 
    [   ]xorg-server-1.10.99.902.tar.gz2011-08-04 04:09 6.4M 
    [   ]xorg-server-1.11.0.tar.bz22011-08-27 01:02 4.8M 
    [   ]xorg-server-1.11.0.tar.gz2011-08-27 01:01 6.4M 
    [   ]xorg-server-1.11.1.901.tar.bz22011-10-14 23:41 4.7M 
    [   ]xorg-server-1.11.1.901.tar.gz2011-10-14 23:40 6.3M 
    [   ]xorg-server-1.11.1.902.tar.bz22011-10-29 01:44 4.7M 
    [   ]xorg-server-1.11.1.902.tar.gz2011-10-29 01:44 6.3M 
    [   ]xorg-server-1.11.1.tar.bz22011-09-24 07:19 4.7M 
    [   ]xorg-server-1.11.1.tar.gz2011-09-24 07:19 6.3M 
    [   ]xorg-server-1.11.2.901.tar.bz22011-11-28 08:17 4.7M 
    [   ]xorg-server-1.11.2.901.tar.gz2011-11-28 08:16 6.3M 
    [   ]xorg-server-1.11.2.902.tar.bz22011-12-09 21:10 4.7M 
    [   ]xorg-server-1.11.2.902.tar.gz2011-12-09 21:10 6.3M 
    [   ]xorg-server-1.11.2.tar.bz22011-11-04 17:38 4.7M 
    [   ]xorg-server-1.11.2.tar.gz2011-11-04 17:38 6.3M 
    [   ]xorg-server-1.11.3.901.tar.bz22012-01-07 07:34 4.7M 
    [   ]xorg-server-1.11.3.901.tar.gz2012-01-07 07:34 6.3M 
    [   ]xorg-server-1.11.3.902.tar.bz22012-01-21 08:29 4.7M 
    [   ]xorg-server-1.11.3.902.tar.gz2012-01-21 08:29 6.3M 
    [   ]xorg-server-1.11.3.tar.bz22011-12-17 02:03 4.7M 
    [   ]xorg-server-1.11.3.tar.gz2011-12-17 02:03 6.3M 
    [   ]xorg-server-1.11.4.tar.bz22012-01-28 05:20 4.7M 
    [   ]xorg-server-1.11.4.tar.gz2012-01-28 05:20 6.3M 
    [   ]xorg-server-1.11.99.1.tar.bz22011-11-20 23:05 4.8M 
    [   ]xorg-server-1.11.99.1.tar.gz2011-11-20 23:04 6.5M 
    [   ]xorg-server-1.11.99.2.tar.bz22011-12-18 01:30 4.8M 
    [   ]xorg-server-1.11.99.2.tar.gz2011-12-18 01:29 6.5M 
    [   ]xorg-server-1.11.99.901.tar.bz22011-12-27 22:19 4.9M 
    [   ]xorg-server-1.11.99.901.tar.gz2011-12-27 22:18 6.6M 
    [   ]xorg-server-1.11.99.902.tar.bz22012-01-28 06:48 4.9M 
    [   ]xorg-server-1.11.99.902.tar.gz2012-01-28 06:47 6.6M 
    [   ]xorg-server-1.11.99.903.tar.bz22012-02-11 03:18 5.3M 
    [   ]xorg-server-1.11.99.903.tar.gz2012-02-11 03:16 7.2M 
    [   ]xorg-server-1.12.0.901.tar.bz22012-03-31 03:15 5.1M 
    [   ]xorg-server-1.12.0.901.tar.gz2012-03-31 03:15 7.0M 
    [   ]xorg-server-1.12.0.902.tar.bz22012-04-10 02:48 5.1M 
    [   ]xorg-server-1.12.0.902.tar.gz2012-04-10 02:48 7.0M 
    [   ]xorg-server-1.12.0.tar.bz22012-03-05 05:12 5.3M 
    [   ]xorg-server-1.12.0.tar.gz2012-03-05 05:11 7.2M 
    [   ]xorg-server-1.12.1.901.tar.bz22012-05-07 07:10 5.2M 
    [   ]xorg-server-1.12.1.901.tar.gz2012-05-07 07:10 7.2M 
    [   ]xorg-server-1.12.1.902.tar.bz22012-05-20 05:17 5.2M 
    [   ]xorg-server-1.12.1.902.tar.gz2012-05-20 05:17 7.2M 
    [   ]xorg-server-1.12.1.tar.bz22012-04-13 22:52 5.1M 
    [   ]xorg-server-1.12.1.tar.gz2012-04-13 22:52 7.0M 
    [   ]xorg-server-1.12.2.901.tar.bz22012-06-15 03:15 5.2M 
    [   ]xorg-server-1.12.2.901.tar.gz2012-06-15 03:14 7.2M 
    [   ]xorg-server-1.12.2.902.tar.bz22012-07-02 00:34 5.2M 
    [   ]xorg-server-1.12.2.902.tar.gz2012-07-02 00:34 7.2M 
    [   ]xorg-server-1.12.2.tar.bz22012-05-29 20:11 5.2M 
    [   ]xorg-server-1.12.2.tar.gz2012-05-29 20:11 7.2M 
    [   ]xorg-server-1.12.3.901.tar.bz22012-08-03 17:26 5.2M 
    [   ]xorg-server-1.12.3.901.tar.gz2012-08-03 17:26 7.2M 
    [   ]xorg-server-1.12.3.902.tar.bz22012-08-19 16:11 5.2M 
    [   ]xorg-server-1.12.3.902.tar.gz2012-08-19 16:11 7.2M 
    [   ]xorg-server-1.12.3.tar.bz22012-07-09 01:21 5.2M 
    [   ]xorg-server-1.12.3.tar.gz2012-07-09 01:21 7.2M 
    [   ]xorg-server-1.12.4.tar.bz22012-08-27 05:15 5.2M 
    [   ]xorg-server-1.12.4.tar.gz2012-08-27 05:15 7.2M 
    [   ]xorg-server-1.12.99.901.tar.bz22012-07-10 08:35 5.2M 
    [   ]xorg-server-1.12.99.901.tar.gz2012-07-10 08:34 7.2M 
    [   ]xorg-server-1.12.99.902.tar.bz22012-07-17 22:50 5.2M 
    [   ]xorg-server-1.12.99.902.tar.gz2012-07-17 22:49 7.2M 
    [   ]xorg-server-1.12.99.903.tar.bz22012-07-26 05:50 5.2M 
    [   ]xorg-server-1.12.99.903.tar.gz2012-07-26 05:49 7.2M 
    [   ]xorg-server-1.12.99.904.tar.bz22012-08-08 00:57 5.2M 
    [   ]xorg-server-1.12.99.904.tar.gz2012-08-08 00:56 7.2M 
    [   ]xorg-server-1.12.99.905.tar.bz22012-08-21 21:53 5.2M 
    [   ]xorg-server-1.12.99.905.tar.gz2012-08-21 21:52 7.2M 
    [   ]xorg-server-1.13.0.901.tar.bz22012-11-23 05:10 5.2M 
    [   ]xorg-server-1.13.0.901.tar.gz2012-11-23 05:09 7.2M 
    [   ]xorg-server-1.13.0.902.tar.bz22012-12-07 06:09 5.2M 
    [   ]xorg-server-1.13.0.902.tar.gz2012-12-07 06:08 7.2M 
    [   ]xorg-server-1.13.0.tar.bz22012-09-05 21:48 5.2M 
    [   ]xorg-server-1.13.0.tar.gz2012-09-05 21:47 7.2M 
    [   ]xorg-server-1.13.1.901.tar.bz22013-01-04 06:51 5.2M 
    [   ]xorg-server-1.13.1.901.tar.gz2013-01-04 06:50 7.2M 
    [   ]xorg-server-1.13.1.tar.bz22012-12-14 21:47 5.2M 
    [   ]xorg-server-1.13.1.tar.bz2.old2012-12-14 04:43 5.2M 
    [   ]xorg-server-1.13.1.tar.gz2012-12-14 21:49 7.2M 
    [   ]xorg-server-1.13.1.tar.gz.old2012-12-14 04:42 7.2M 
    [   ]xorg-server-1.13.2.901.tar.bz22013-02-16 07:14 5.2M 
    [   ]xorg-server-1.13.2.901.tar.gz2013-02-16 07:14 7.2M 
    [   ]xorg-server-1.13.2.902.tar.bz22013-03-01 07:31 5.2M 
    [   ]xorg-server-1.13.2.902.tar.gz2013-03-01 07:31 7.2M 
    [   ]xorg-server-1.13.2.tar.bz22013-01-25 06:01 5.2M 
    [   ]xorg-server-1.13.2.tar.gz2013-01-25 06:00 7.2M 
    [   ]xorg-server-1.13.3.tar.bz22013-03-08 06:19 5.2M 
    [   ]xorg-server-1.13.3.tar.gz2013-03-08 06:19 7.2M 
    [   ]xorg-server-1.13.4.tar.bz22013-04-17 06:00 5.3M 
    [   ]xorg-server-1.13.4.tar.gz2013-04-17 05:59 7.2M 
    [   ]xorg-server-1.13.99.901.tar.bz22012-12-19 20:50 5.2M 
    [   ]xorg-server-1.13.99.901.tar.gz2012-12-19 20:50 7.2M 
    [   ]xorg-server-1.13.99.902.tar.bz22013-02-14 05:44 5.3M 
    [   ]xorg-server-1.13.99.902.tar.gz2013-02-14 05:43 7.3M 
    [   ]xorg-server-1.14.0.tar.bz22013-03-06 06:35 5.3M 
    [   ]xorg-server-1.14.0.tar.gz2013-03-06 06:34 7.3M 
    [   ]xorg-server-1.14.1.901.tar.bz22013-05-31 06:09 5.2M 
    [   ]xorg-server-1.14.1.901.tar.gz2013-05-31 06:09 7.2M 
    [   ]xorg-server-1.14.1.902.tar.bz22013-06-13 22:28 5.2M 
    [   ]xorg-server-1.14.1.902.tar.gz2013-06-13 22:28 7.2M 
    [   ]xorg-server-1.14.1.tar.bz22013-04-17 07:37 5.3M 
    [   ]xorg-server-1.14.1.tar.gz2013-04-17 07:36 7.3M 
    [   ]xorg-server-1.14.2-rc1.tar.bz22013-05-31 04:38 5.2M 
    [   ]xorg-server-1.14.2-rc1.tar.gz2013-05-31 04:38 7.2M 
    [   ]xorg-server-1.14.2.901.tar.bz22013-07-26 05:47 5.2M 
    [   ]xorg-server-1.14.2.901.tar.bz2.old2013-07-26 04:27 5.2M 
    [   ]xorg-server-1.14.2.901.tar.gz2013-07-26 05:47 7.2M 
    [   ]xorg-server-1.14.2.901.tar.gz.old2013-07-26 04:27 7.2M 
    [   ]xorg-server-1.14.2.902.tar.bz22013-08-22 23:57 5.2M 
    [   ]xorg-server-1.14.2.902.tar.gz2013-08-22 23:57 7.3M 
    [   ]xorg-server-1.14.2.tar.bz22013-06-25 15:52 5.2M 
    [   ]xorg-server-1.14.2.tar.gz2013-06-25 15:52 7.2M 
    [   ]xorg-server-1.14.3-rc1.tar.bz22013-07-26 04:21 5.2M 
    [   ]xorg-server-1.14.3-rc1.tar.gz2013-07-26 04:21 7.2M 
    [   ]xorg-server-1.14.3.901.tar.bz22013-10-26 19:53 5.3M 
    [   ]xorg-server-1.14.3.901.tar.gz2013-10-26 19:53 7.3M 
    [   ]xorg-server-1.14.3.tar.bz22013-09-13 03:19 5.2M 
    [   ]xorg-server-1.14.3.tar.gz2013-09-13 03:19 7.3M 
    [   ]xorg-server-1.14.4.901.tar.bz22013-11-22 05:13 5.2M 
    [   ]xorg-server-1.14.4.901.tar.gz2013-11-22 05:13 7.3M 
    [   ]xorg-server-1.14.4.tar.bz22013-11-01 05:31 5.3M 
    [   ]xorg-server-1.14.4.tar.gz2013-11-01 05:31 7.3M 
    [   ]xorg-server-1.14.5.901.tar.bz22014-03-22 05:21 5.3M 
    [   ]xorg-server-1.14.5.901.tar.gz2014-03-22 05:21 7.3M 
    [   ]xorg-server-1.14.5.tar.bz22013-12-13 03:53 5.2M 
    [   ]xorg-server-1.14.5.tar.gz2013-12-13 03:53 7.3M 
    [   ]xorg-server-1.14.6.tar.bz22014-04-14 02:49 5.3M 
    [   ]xorg-server-1.14.6.tar.gz2014-04-14 02:49 7.3M 
    [   ]xorg-server-1.14.7.tar.bz22014-06-06 04:20 5.3M 
    [   ]xorg-server-1.14.7.tar.gz2014-06-06 04:19 7.3M 
    [   ]xorg-server-1.14.99.1.tar.bz22013-04-24 17:16 5.3M 
    [   ]xorg-server-1.14.99.1.tar.gz2013-04-24 17:15 7.3M 
    [   ]xorg-server-1.14.99.2.tar.bz22013-10-05 00:01 5.3M 
    [   ]xorg-server-1.14.99.2.tar.gz2013-10-05 00:00 7.4M 
    [   ]xorg-server-1.14.99.3.tar.bz22013-10-19 00:34 5.3M 
    [   ]xorg-server-1.14.99.3.tar.gz2013-10-19 00:33 7.4M 
    [   ]xorg-server-1.14.99.901.tar.bz22013-11-01 08:51 5.3M 
    [   ]xorg-server-1.14.99.901.tar.gz2013-11-01 08:50 7.3M 
    [   ]xorg-server-1.14.99.902.tar.bz22013-11-14 01:32 5.3M 
    [   ]xorg-server-1.14.99.902.tar.gz2013-11-14 01:32 7.3M 
    [   ]xorg-server-1.14.99.903.tar.bz22013-11-24 06:31 5.3M 
    [   ]xorg-server-1.14.99.903.tar.gz2013-11-24 06:30 7.3M 
    [   ]xorg-server-1.14.99.904.tar.bz22013-12-11 15:57 5.3M 
    [   ]xorg-server-1.14.99.904.tar.gz2013-12-11 15:56 7.3M 
    [   ]xorg-server-1.14.99.905.tar.bz22013-12-19 22:35 5.3M 
    [   ]xorg-server-1.14.99.905.tar.gz2013-12-19 22:35 7.3M 
    [   ]xorg-server-1.15.0.901.tar.bz22014-03-22 06:04 5.2M 
    [   ]xorg-server-1.15.0.901.tar.gz2014-03-22 06:04 7.3M 
    [   ]xorg-server-1.15.0.tar.bz22013-12-27 18:01 5.3M 
    [   ]xorg-server-1.15.0.tar.gz2013-12-27 18:00 7.3M 
    [   ]xorg-server-1.15.1.tar.bz22014-04-14 03:16 5.2M 
    [   ]xorg-server-1.15.1.tar.gz2014-04-14 03:16 7.3M 
    [   ]xorg-server-1.15.2.tar.bz22014-06-27 01:30 5.3M 
    [   ]xorg-server-1.15.2.tar.bz2.sig2014-06-27 01:30 72  
    [   ]xorg-server-1.15.2.tar.gz2014-06-27 01:29 7.4M 
    [   ]xorg-server-1.15.2.tar.gz.sig2014-06-27 01:30 72  
    [   ]xorg-server-1.15.99.901.tar.bz22014-02-24 21:52 5.4M 
    [   ]xorg-server-1.15.99.901.tar.gz2014-02-24 21:52 7.6M 
    [   ]xorg-server-1.15.99.902.tar.bz22014-04-08 21:32 5.5M 
    [   ]xorg-server-1.15.99.902.tar.gz2014-04-08 21:31 7.7M 
    [   ]xorg-server-1.15.99.903.tar.bz22014-06-05 05:41 5.5M 
    [   ]xorg-server-1.15.99.903.tar.gz2014-06-05 05:40 7.7M 
    [   ]xorg-server-1.15.99.904.tar.bz22014-07-07 23:35 5.6M 
    [   ]xorg-server-1.15.99.904.tar.bz2.sig2014-07-07 23:35 536  
    [   ]xorg-server-1.15.99.904.tar.gz2014-07-07 23:35 7.7M 
    [   ]xorg-server-1.15.99.904.tar.gz.sig2014-07-07 23:35 536  
    [   ]xorg-server-1.16.0.901.tar.bz22014-09-15 21:38 5.5M 
    [   ]xorg-server-1.16.0.901.tar.bz2.sig2014-09-15 21:38 543  
    [   ]xorg-server-1.16.0.901.tar.gz2014-09-15 21:37 7.7M 
    [   ]xorg-server-1.16.0.901.tar.gz.sig2014-09-15 21:38 543  
    [   ]xorg-server-1.16.0.tar.bz22014-07-17 07:09 5.6M 
    [   ]xorg-server-1.16.0.tar.bz2.sig2014-07-17 07:09 536  
    [   ]xorg-server-1.16.0.tar.gz2014-07-17 07:08 7.7M 
    [   ]xorg-server-1.16.0.tar.gz.sig2014-07-17 07:09 536  
    [   ]xorg-server-1.16.1.901.tar.bz22014-11-02 10:52 5.5M 
    [   ]xorg-server-1.16.1.901.tar.bz2.sig2014-11-02 10:52 543  
    [   ]xorg-server-1.16.1.901.tar.gz2014-11-02 10:51 7.7M 
    [   ]xorg-server-1.16.1.901.tar.gz.sig2014-11-02 10:52 543  
    [   ]xorg-server-1.16.1.tar.bz22014-09-21 09:17 5.5M 
    [   ]xorg-server-1.16.1.tar.bz2.sig2014-09-21 09:17 543  
    [   ]xorg-server-1.16.1.tar.gz2014-09-21 09:16 7.7M 
    [   ]xorg-server-1.16.1.tar.gz.sig2014-09-21 09:17 543  
    [   ]xorg-server-1.16.2.901.tar.bz22014-12-09 20:12 5.5M 
    [   ]xorg-server-1.16.2.901.tar.bz2.sig2014-12-09 20:12 543  
    [   ]xorg-server-1.16.2.901.tar.gz2014-12-09 20:11 7.7M 
    [   ]xorg-server-1.16.2.901.tar.gz.sig2014-12-09 20:12 543  
    [   ]xorg-server-1.16.2.tar.bz22014-11-10 15:53 5.5M 
    [   ]xorg-server-1.16.2.tar.bz2.sig2014-11-10 15:53 543  
    [   ]xorg-server-1.16.2.tar.gz2014-11-10 15:53 7.7M 
    [   ]xorg-server-1.16.2.tar.gz.sig2014-11-10 15:53 543  
    [   ]xorg-server-1.16.3.tar.bz22014-12-20 12:19 5.5M 
    [   ]xorg-server-1.16.3.tar.bz2.sig2014-12-20 12:19 543  
    [   ]xorg-server-1.16.3.tar.gz2014-12-20 12:18 7.7M 
    [   ]xorg-server-1.16.3.tar.gz.sig2014-12-20 12:19 543  
    [   ]xorg-server-1.16.4.tar.bz22015-02-11 00:15 5.5M 
    [   ]xorg-server-1.16.4.tar.bz2.sig2015-02-11 00:15 543  
    [   ]xorg-server-1.16.4.tar.gz2015-02-11 00:14 7.7M 
    [   ]xorg-server-1.16.4.tar.gz.sig2015-02-11 00:15 543  
    [   ]xorg-server-1.16.99.901.tar.bz22014-10-29 04:37 5.6M 
    [   ]xorg-server-1.16.99.901.tar.bz2.sig2014-10-29 04:37 536  
    [   ]xorg-server-1.16.99.901.tar.gz2014-10-29 04:37 7.8M 
    [   ]xorg-server-1.16.99.901.tar.gz.sig2014-10-29 04:37 536  
    [   ]xorg-server-1.16.99.902.tar.bz22015-01-23 19:03 5.6M 
    [   ]xorg-server-1.16.99.902.tar.bz2.sig2015-01-23 19:03 536  
    [   ]xorg-server-1.16.99.902.tar.gz2015-01-23 19:03 7.8M 
    [   ]xorg-server-1.16.99.902.tar.gz.sig2015-01-23 19:03 536  
    [   ]xorg-server-1.17.0.tar.bz22015-02-04 17:37 5.6M 
    [   ]xorg-server-1.17.0.tar.bz2.sig2015-02-04 17:37 536  
    [   ]xorg-server-1.17.0.tar.gz2015-02-04 17:37 7.8M 
    [   ]xorg-server-1.17.0.tar.gz.sig2015-02-04 17:37 536  
    [   ]xorg-server-1.17.1.tar.bz22015-02-10 22:53 5.6M 
    [   ]xorg-server-1.17.1.tar.bz2.sig2015-02-10 22:53 536  
    [   ]xorg-server-1.17.1.tar.gz2015-02-10 22:52 7.8M 
    [   ]xorg-server-1.17.1.tar.gz.sig2015-02-10 22:53 536  
    [   ]xorg-server-1.17.2.tar.bz22015-06-16 16:31 5.5M 
    [   ]xorg-server-1.17.2.tar.bz2.sig2015-06-16 16:31 72  
    [   ]xorg-server-1.17.2.tar.gz2015-06-16 16:30 7.7M 
    [   ]xorg-server-1.17.2.tar.gz.sig2015-06-16 16:31 72  
    [   ]xorg-server-1.17.3.tar.bz22015-10-26 17:09 5.5M 
    [   ]xorg-server-1.17.3.tar.bz2.sig2015-10-26 17:09 72  
    [   ]xorg-server-1.17.3.tar.gz2015-10-26 17:09 7.8M 
    [   ]xorg-server-1.17.3.tar.gz.sig2015-10-26 17:09 72  
    [   ]xorg-server-1.17.4.tar.bz22015-10-28 16:38 5.5M 
    [   ]xorg-server-1.17.4.tar.bz2.sig2015-10-28 16:38 72  
    [   ]xorg-server-1.17.4.tar.gz2015-10-28 16:38 7.8M 
    [   ]xorg-server-1.17.4.tar.gz.sig2015-10-28 16:38 72  
    [   ]xorg-server-1.17.99.901.tar.bz22015-09-02 02:34 5.6M 
    [   ]xorg-server-1.17.99.901.tar.bz2.sig2015-09-02 02:34 536  
    [   ]xorg-server-1.17.99.901.tar.gz2015-09-02 02:34 7.9M 
    [   ]xorg-server-1.17.99.901.tar.gz.sig2015-09-02 02:34 536  
    [   ]xorg-server-1.17.99.902.tar.bz22015-10-26 18:13 5.5M 
    [   ]xorg-server-1.17.99.902.tar.bz2.sig2015-10-26 18:13 72  
    [   ]xorg-server-1.17.99.902.tar.gz2015-10-26 18:13 7.8M 
    [   ]xorg-server-1.17.99.902.tar.gz.sig2015-10-26 18:13 72  
    [   ]xorg-server-1.18.0.tar.bz22015-11-09 21:11 5.5M 
    [   ]xorg-server-1.18.0.tar.bz2.sig2015-11-09 21:11 72  
    [   ]xorg-server-1.18.0.tar.gz2015-11-09 21:11 7.8M 
    [   ]xorg-server-1.18.0.tar.gz.sig2015-11-09 21:11 72  
    [   ]xorg-server-1.18.1.tar.bz22016-02-08 23:41 5.6M 
    [   ]xorg-server-1.18.1.tar.bz2.sig2016-02-08 23:41 72  
    [   ]xorg-server-1.18.1.tar.gz2016-02-08 23:41 7.8M 
    [   ]xorg-server-1.18.1.tar.gz.sig2016-02-08 23:41 72  
    [   ]xorg-server-1.18.2.tar.bz22016-03-11 21:45 5.6M 
    [   ]xorg-server-1.18.2.tar.bz2.sig2016-03-11 21:45 72  
    [   ]xorg-server-1.18.2.tar.gz2016-03-11 21:45 7.8M 
    [   ]xorg-server-1.18.2.tar.gz.sig2016-03-11 21:45 72  
    [   ]xorg-server-1.18.3.tar.bz22016-04-04 19:48 5.6M 
    [   ]xorg-server-1.18.3.tar.bz2.sig2016-04-04 19:48 72  
    [   ]xorg-server-1.18.3.tar.gz2016-04-04 19:48 7.9M 
    [   ]xorg-server-1.18.3.tar.gz.sig2016-04-04 19:48 72  
    [   ]xorg-server-1.18.4.tar.bz22016-07-19 17:42 5.7M 
    [   ]xorg-server-1.18.4.tar.bz2.sig2016-07-19 17:42 72  
    [   ]xorg-server-1.18.4.tar.gz2016-07-19 17:42 8.0M 
    [   ]xorg-server-1.18.4.tar.gz.sig2016-07-19 17:42 72  
    [   ]xorg-server-1.18.99.2.tar.bz22016-09-16 20:55 5.8M 
    [   ]xorg-server-1.18.99.2.tar.bz2.sig2016-09-16 20:55 543  
    [   ]xorg-server-1.18.99.2.tar.gz2016-09-16 20:54 8.0M 
    [   ]xorg-server-1.18.99.2.tar.gz.sig2016-09-16 20:55 543  
    [   ]xorg-server-1.18.99.901.tar.bz22016-09-19 16:10 5.8M 
    [   ]xorg-server-1.18.99.901.tar.bz2.sig2016-09-19 16:10 543  
    [   ]xorg-server-1.18.99.901.tar.gz2016-09-19 16:10 8.0M 
    [   ]xorg-server-1.18.99.901.tar.gz.sig2016-09-19 16:10 543  
    [   ]xorg-server-1.18.99.902.tar.bz22016-10-28 16:47 5.8M 
    [   ]xorg-server-1.18.99.902.tar.bz2.sig2016-10-28 16:47 543  
    [   ]xorg-server-1.18.99.902.tar.gz2016-10-28 16:47 8.1M 
    [   ]xorg-server-1.18.99.902.tar.gz.sig2016-10-28 16:47 543  
    [   ]xorg-server-1.19.0.tar.bz22016-11-15 17:08 5.8M 
    [   ]xorg-server-1.19.0.tar.bz2.sig2016-11-15 17:08 543  
    [   ]xorg-server-1.19.0.tar.gz2016-11-15 17:07 8.1M 
    [   ]xorg-server-1.19.0.tar.gz.sig2016-11-15 17:08 543  
    [   ]xorg-server-1.19.1.tar.bz22017-01-11 21:25 5.8M 
    [   ]xorg-server-1.19.1.tar.bz2.sig2017-01-11 21:25 72  
    [   ]xorg-server-1.19.1.tar.gz2017-01-11 21:25 8.1M 
    [   ]xorg-server-1.19.1.tar.gz.sig2017-01-11 21:25 72  
    [   ]xorg-server-1.19.2.tar.bz22017-03-02 23:05 5.7M 
    [   ]xorg-server-1.19.2.tar.bz2.sig2017-03-02 23:05 72  
    [   ]xorg-server-1.19.2.tar.gz2017-03-02 23:05 7.9M 
    [   ]xorg-server-1.19.2.tar.gz.sig2017-03-02 23:05 72  
    [   ]xorg-server-1.19.3.tar.bz22017-03-15 18:12 5.8M 
    [   ]xorg-server-1.19.3.tar.bz2.sig2017-03-15 18:12 72  
    [   ]xorg-server-1.19.3.tar.gz2017-03-15 18:12 8.1M 
    [   ]xorg-server-1.19.3.tar.gz.sig2017-03-15 18:12 72  
    [   ]xorg-server-1.19.4.tar.bz22017-10-04 22:00 5.7M 
    [   ]xorg-server-1.19.4.tar.bz2.sig2017-10-04 22:00 438  
    [   ]xorg-server-1.19.4.tar.gz2017-10-04 22:00 8.0M 
    [   ]xorg-server-1.19.4.tar.gz.sig2017-10-04 22:00 438  
    [   ]xorg-server-1.19.5.tar.bz22017-10-12 17:31 5.7M 
    [   ]xorg-server-1.19.5.tar.bz2.sig2017-10-12 17:31 438  
    [   ]xorg-server-1.19.5.tar.gz2017-10-12 17:31 8.0M 
    [   ]xorg-server-1.19.5.tar.gz.sig2017-10-12 17:31 438  
    [   ]xorg-server-1.19.6.tar.bz22017-12-20 20:39 5.7M 
    [   ]xorg-server-1.19.6.tar.bz2.sig2017-12-20 20:39 438  
    [   ]xorg-server-1.19.6.tar.gz2017-12-20 20:39 8.0M 
    [   ]xorg-server-1.19.6.tar.gz.sig2017-12-20 20:39 438  
    [   ]xorg-server-1.19.7.tar.bz22019-03-02 23:03 5.8M 
    [   ]xorg-server-1.19.7.tar.bz2.sig2019-03-02 23:03 287  
    [   ]xorg-server-1.19.7.tar.gz2019-03-02 23:02 8.1M 
    [   ]xorg-server-1.19.7.tar.gz.sig2019-03-02 23:03 287  
    [   ]xorg-server-1.19.99.901.tar.bz22018-02-28 18:28 5.8M 
    [   ]xorg-server-1.19.99.901.tar.bz2.sig2018-02-28 18:28 95  
    [   ]xorg-server-1.19.99.901.tar.gz2018-02-28 18:28 8.1M 
    [   ]xorg-server-1.19.99.901.tar.gz.sig2018-02-28 18:28 95  
    [   ]xorg-server-1.19.99.902.tar.bz22018-03-28 20:39 5.8M 
    [   ]xorg-server-1.19.99.902.tar.bz2.sig2018-03-28 20:39 438  
    [   ]xorg-server-1.19.99.902.tar.gz2018-03-28 20:39 8.1M 
    [   ]xorg-server-1.19.99.902.tar.gz.sig2018-03-28 20:39 438  
    [   ]xorg-server-1.19.99.903.tar.bz22018-04-02 19:56 5.8M 
    [   ]xorg-server-1.19.99.903.tar.bz2.sig2018-04-02 19:56 438  
    [   ]xorg-server-1.19.99.903.tar.gz2018-04-02 19:56 8.1M 
    [   ]xorg-server-1.19.99.903.tar.gz.sig2018-04-02 19:56 438  
    [   ]xorg-server-1.19.99.904.tar.bz22018-04-10 19:50 5.8M 
    [   ]xorg-server-1.19.99.904.tar.bz2.sig2018-04-10 19:50 438  
    [   ]xorg-server-1.19.99.904.tar.gz2018-04-10 19:50 8.1M 
    [   ]xorg-server-1.19.99.904.tar.gz.sig2018-04-10 19:50 438  
    [   ]xorg-server-1.19.99.905.tar.bz22018-04-24 21:12 5.8M 
    [   ]xorg-server-1.19.99.905.tar.bz2.sig2018-04-24 21:12 438  
    [   ]xorg-server-1.19.99.905.tar.gz2018-04-24 21:12 8.1M 
    [   ]xorg-server-1.19.99.905.tar.gz.sig2018-04-24 21:12 438  
    [   ]xorg-server-1.20.0.tar.bz22018-05-10 16:38 5.8M 
    [   ]xorg-server-1.20.0.tar.bz2.sig2018-05-10 16:38 438  
    [   ]xorg-server-1.20.0.tar.gz2018-05-10 16:38 8.1M 
    [   ]xorg-server-1.20.0.tar.gz.sig2018-05-10 16:38 438  
    [   ]xorg-server-1.20.1.tar.bz22018-08-07 16:37 5.8M 
    [   ]xorg-server-1.20.1.tar.bz2.sig2018-08-07 16:37 438  
    [   ]xorg-server-1.20.1.tar.gz2018-08-07 16:37 8.1M 
    [   ]xorg-server-1.20.1.tar.gz.sig2018-08-07 16:37 438  
    [   ]xorg-server-1.20.2.tar.bz22018-10-15 16:03 5.9M 
    [   ]xorg-server-1.20.2.tar.bz2.sig2018-10-15 16:03 438  
    [   ]xorg-server-1.20.2.tar.gz2018-10-15 16:03 8.2M 
    [   ]xorg-server-1.20.2.tar.gz.sig2018-10-15 16:03 438  
    [   ]xorg-server-1.20.3.tar.bz22018-10-25 14:17 5.9M 
    [   ]xorg-server-1.20.3.tar.bz2.sig2018-10-25 14:17 438  
    [   ]xorg-server-1.20.3.tar.gz2018-10-25 14:17 8.2M 
    [   ]xorg-server-1.20.3.tar.gz.sig2018-10-25 14:17 438  
    [   ]xorg-server-1.20.4.tar.bz22019-02-26 19:33 5.8M 
    [   ]xorg-server-1.20.4.tar.bz2.sig2019-02-26 19:33 438  
    [   ]xorg-server-1.20.4.tar.gz2019-02-26 19:33 8.2M 
    [   ]xorg-server-1.20.4.tar.gz.sig2019-02-26 19:33 438  
    [   ]xorg-server-1.20.5.tar.bz22019-05-30 18:32 5.8M 
    [   ]xorg-server-1.20.5.tar.bz2.sig2019-05-30 18:32 438  
    [   ]xorg-server-1.20.5.tar.gz2019-05-30 18:32 8.2M 
    [   ]xorg-server-1.20.5.tar.gz.sig2019-05-30 18:32 438  
    [   ]xorg-server-1.20.6.tar.bz22019-11-22 23:50 6.0M 
    [   ]xorg-server-1.20.6.tar.bz2.sig2019-11-22 23:50 215  
    [   ]xorg-server-1.20.6.tar.gz2019-11-22 23:50 8.3M 
    [   ]xorg-server-1.20.6.tar.gz.sig2019-11-22 23:50 215  

    -
    Apache/2.4.38 (Debian) Server at www.x.org Port 443
    - diff --git a/bitbake/lib/bb/tests/fetch-testdata/software/libxml2/2.10/index.html b/bitbake/lib/bb/tests/fetch-testdata/software/libxml2/2.10/index.html deleted file mode 100644 index 4e41af6d6a..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/software/libxml2/2.10/index.html +++ /dev/null @@ -1,20 +0,0 @@ - - -Index of /sources/libxml2/2.10/ -

    Index of /sources/libxml2/2.10/

    - - - - - - - - - - - - - - - -
    File Name  ↓ File Size  ↓ Date  ↓ 
    --
    2.5 MiB2022-Oct-14 12:55
    7.1 KiB2022-Aug-17 11:55
    174 B2022-Aug-17 11:55
    2.6 MiB2022-Aug-17 11:55
    455 B2022-Aug-25 11:33
    174 B2022-Aug-25 11:33
    2.6 MiB2022-Aug-25 11:33
    309 B2022-Aug-29 14:56
    174 B2022-Aug-29 14:56
    2.5 MiB2022-Aug-29 14:56
    294 B2022-Oct-14 12:55
    174 B2022-Oct-14 12:55
    2.5 MiB2022-Oct-14 12:55
    diff --git a/bitbake/lib/bb/tests/fetch-testdata/software/libxml2/2.9/index.html b/bitbake/lib/bb/tests/fetch-testdata/software/libxml2/2.9/index.html deleted file mode 100644 index abdfdd0fa2..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/software/libxml2/2.9/index.html +++ /dev/null @@ -1,40 +0,0 @@ - - -Index of /sources/libxml2/2.9/ -

    Index of /sources/libxml2/2.9/

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    File Name  ↓ File Size  ↓ Date  ↓ 
    --
    3.0 MiB2022-May-02 12:03
    87 B2022-Feb-14 18:27
    3.0 MiB2022-Feb-14 18:27
    87 B2022-Feb-14 18:28
    3.0 MiB2022-Feb-14 18:28
    88 B2022-Feb-14 18:42
    3.2 MiB2022-Feb-14 18:42
    88 B2022-Feb-14 18:43
    3.2 MiB2022-Feb-14 18:43
    88 B2022-Feb-14 18:45
    3.2 MiB2022-Feb-14 18:45
    26.6 KiB2022-Feb-20 12:42
    174 B2022-Feb-20 12:42
    3.1 MiB2022-Feb-20 12:42
    1.0 KiB2022-May-02 12:03
    174 B2022-May-02 12:03
    3.0 MiB2022-May-02 12:03
    87 B2022-Feb-14 18:30
    3.2 MiB2022-Feb-14 18:30
    87 B2022-Feb-14 18:31
    3.2 MiB2022-Feb-14 18:31
    87 B2022-Feb-14 18:33
    2.9 MiB2022-Feb-14 18:33
    87 B2022-Feb-14 18:35
    3.0 MiB2022-Feb-14 18:35
    87 B2022-Feb-14 18:36
    3.0 MiB2022-Feb-14 18:36
    87 B2022-Feb-14 18:37
    3.0 MiB2022-Feb-14 18:37
    87 B2022-Feb-14 18:39
    3.0 MiB2022-Feb-14 18:39
    87 B2022-Feb-14 18:40
    3.0 MiB2022-Feb-14 18:40
    diff --git a/bitbake/lib/bb/tests/fetch-testdata/software/libxml2/index.html b/bitbake/lib/bb/tests/fetch-testdata/software/libxml2/index.html deleted file mode 100644 index c183e06a55..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/software/libxml2/index.html +++ /dev/null @@ -1,19 +0,0 @@ - - -Index of /sources/libxml2/ -

    Index of /sources/libxml2/

    - - - - - - - - - - - - - - -
    File Name  ↓ File Size  ↓ Date  ↓ 
    --
    -2009-Jul-14 13:04
    -2009-Jul-14 13:04
    -2022-Oct-14 12:55
    -2009-Jul-14 13:04
    -2009-Jul-14 13:05
    -2009-Jul-14 13:05
    -2009-Jul-14 13:05
    -2009-Jul-14 13:05
    -2022-Feb-14 18:24
    -2022-Feb-14 18:26
    -2022-May-02 12:04
    22.8 KiB2022-Oct-14 12:55
    diff --git a/bitbake/lib/bb/tests/fetch-testdata/software/miniupnp/download.php b/bitbake/lib/bb/tests/fetch-testdata/software/miniupnp/download.php deleted file mode 100644 index e27ee134f2..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/software/miniupnp/download.php +++ /dev/null @@ -1,3528 +0,0 @@ - - - - -MiniUPnP download zone - - - - - - - - - - - -

    MiniUPnP Project

    - -

    -Home | -Downloads | -Compatibility list | -libnatpmp | -MiniSSDPd | -xchat upnp patch | -Search | -Forum -

    -

    -English | Français -

    - -
    - - -
    - -

    MiniUPnP download zone

    -

    -Find on this page the source of miniupnp and -some related files. You will also find precompiled binaries -of the UPnP client sample program for windows compiled using -MinGW. There are also Windows -binaries (including python module) automatically built using -AppVeyor. -

    -

    If you just need one of the software installed on your machine, -you probably don't need to download and compile the source files. -It is very likely that a package/port already exists for -your system/distribution. Refer to your system documentation -to find how to search and install a package/port. -Mac OS X does have port systems too : see -MacPorts or -Homebrew or -Fink. -

    -

    -The miniupnpc (client) sources have been successfully compiled -under Windows XP/vista/7/10/etc. (using -MinGW, -Mingw-w64 -or Cygwin), -Linux, OpenBSD, FreeBSD, NetBSD, DragonFlyBSD, -Solaris, MacOS X and AmigaOS.
    -The Makefile of the client is made for GNU make : -check which version your system have -with the command "make --version". On some systems, such as OpenBSD, -you have to use "gmake". Under Windows with MinGW, GNU make is -called "mingw32-make" and a sligthly modified version of the Makefile -should be used : Makefile.mingw. Run "mingw32make.bat" to compile.
    -If you have any compatibility problem, please post on the -forum -or contact me by email. -

    - -

    Get miniupnpc under AmigaOS 4 on -OS4Depot. -

    -

    -Dario Meloni has made a Ruby Gem embedding miniupnpc : -https://rubygems.org/gems/mupnp. -

    -

    -The python module is available on pypi.org : -pip install miniupnpc. -

    -

    -The daemon (starting in November 2006) compiles with BSD make under BSD -and Solaris.
    -To compile the daemon under linux, use "make -f Makefile.linux"
    -To compile for OpenWRT -please read the README.openwrt file, or use the packages -miniupnpc and -miniupnpd. - -
    -pfSense users are advised to use the -miniupnpd port available for their system. Recent versions of -pfSense include MiniUPnPd in the base system. -
    -For Linksys WRT54G -and WRT54GL owners, -Tarifa firmware -is another alternative to get miniUPnPd running on the router. -

    -

    -Please read README and -LICENCE files included with the distribution for further informations. -

    -

    -The MiniUPnP daemon (miniupnpd) is working under -OpenBSD, -NetBSD, -FreeBSD, -DragonFlyBSD, -Mac OS X and -(Open)Solaris -with pf, -with IP Filter or -with ipfw. -The linux version uses either libiptc which permits to access -netfilter -rules inside the kernel the same way as -iptables, or -libnftnl -which is the equivalent for -nftables. -

    - -

    Releases are now GPG signed with the key A31ACAAF. -Previous signing key was A5C0863C. -Get it from your favorite -key server.

    - -

    REST API

    -

    You can use the REST API to get the latest releases available:

    - - -

    You can help !

    -

    If you make a package/port for your favorite OS distribution, -inform me so I can upload the package here or add a link to your -repository. -

    - -

    Latest files

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    namesizedatecomment
    miniupnpc-2.3.2.tar.gz14013705/03/2025 10:31MiniUPnP client release source codechangelogSignature
    miniupnpd-2.3.7.tar.gz26532922/06/2024 22:31MiniUPnP daemon release source codechangelogSignature
    libnatpmp-20230423.tar.gz2650623/04/2023 11:02latest libnatpmp source codechangelogSignature
    minissdpd-1.6.0.tar.gz3907722/10/2022 18:41MiniSSDPd release source codechangelogSignature
    upnpc-exe-win32-20220515.zip6950315/05/2022 14:31Windows executablechangelog
    minissdpd-1.5.20211105.tar.gz3887004/11/2021 23:34latest MiniSSDPd source codechangelogSignature
    miniupnpc-2.1.20201016.tar.gz9768215/10/2020 22:31latest MiniUPnP client source codechangelogSignature
    miniupnpd-2.1.20200510.tar.gz24542610/05/2020 18:23latest MiniUPnP daemon source codechangelogSignature
    xchat-upnp20110811.patch1032911/08/2011 15:18Patch to add UPnP capabilities to xchatchangelog
    minidlna_1.0.21.minissdp1.patch759825/07/2011 14:57Patch for MiniDLNA to use miniSSDPDchangelog
    miniupnpc-new20060630.tar.gz1484004/11/2006 18:16João Paulo Barraca version of the upnp clientchangelog
    - -

    All files

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    namesizedatecomment
    miniupnpc-2.3.2.tar.gz14013705/03/2025 10:31:36 +0000MiniUPnP client release source codeSignature
    miniupnpc-2.3.1.tar.gz13949923/02/2025 16:44:16 +0000MiniUPnP client release source codeSignature
    miniupnpc-2.3.0.tar.gz10507110/01/2025 23:16:45 +0000MiniUPnP client release source codeSignature
    miniupnpd-2.3.7.tar.gz26532922/06/2024 22:31:38 +0000MiniUPnP daemon release source codeSignature
    miniupnpc-2.2.8.tar.gz10460308/06/2024 22:13:39 +0000MiniUPnP client release source codeSignature
    miniupnpd-2.3.6.tar.gz26301819/03/2024 23:39:51 +0000MiniUPnP daemon release source codeSignature
    miniupnpc-2.2.7.tar.gz10425819/03/2024 23:25:18 +0000MiniUPnP client release source codeSignature
    miniupnpd-2.3.5.tar.gz26195202/03/2024 11:04:07 +0000MiniUPnP daemon release source codeSignature
    miniupnpd-2.3.4.tar.gz26081004/01/2024 00:53:17 +0000MiniUPnP daemon release source codeSignature
    miniupnpc-2.2.6.tar.gz10394904/01/2024 00:27:14 +0000MiniUPnP client release source codeSignature
    miniupnpc-2.2.5.tar.gz10365411/06/2023 23:14:56 +0000MiniUPnP client release source codeSignature
    libnatpmp-20230423.tar.gz2650623/04/2023 11:02:09 +0000libnatpmp source codeSignature
    miniupnpd-2.3.3.tar.gz26007917/02/2023 03:07:46 +0000MiniUPnP daemon release source codeSignature
    miniupnpd-2.3.2.tar.gz25968619/01/2023 23:18:08 +0000MiniUPnP daemon release source codeSignature
    minissdpd-1.6.0.tar.gz3907722/10/2022 18:41:54 +0000MiniSSDPd release source codeSignature
    miniupnpc-2.2.4.tar.gz10293221/10/2022 21:01:01 +0000MiniUPnP client release source codeSignature
    miniupnpd-2.3.1.tar.gz25805016/10/2022 05:58:44 +0000MiniUPnP daemon release source codeSignature
    upnpc-exe-win32-20220515.zip6950315/05/2022 14:31:25 +0000Windows executable
    hexchat-2.16.patch814719/03/2022 16:52:05 +0000
    miniupnpd-2.3.0.tar.gz25606923/01/2022 00:23:32 +0000MiniUPnP daemon release source codeSignature
    minissdpd-1.5.20211105.tar.gz3887004/11/2021 23:34:49 +0000MiniSSDPd source codeSignature
    miniupnpc-2.2.3.tar.gz10136028/09/2021 21:43:32 +0000MiniUPnP client release source codeSignature
    miniupnpd-2.2.3.tar.gz25475221/08/2021 08:35:13 +0000MiniUPnP daemon release source codeSignature
    miniupnpd-2.2.2.tar.gz25064913/05/2021 11:30:11 +0000MiniUPnP daemon release source codeSignature
    miniupnpc-2.2.2.tar.gz10000802/03/2021 23:44:52 +0000MiniUPnP client release source codeSignature
    miniupnpd-2.2.1.tar.gz25002320/12/2020 18:08:08 +0000MiniUPnP daemon release source codeSignature
    miniupnpc-2.2.1.tar.gz9959520/12/2020 18:08:02 +0000MiniUPnP client release source codeSignature
    miniupnpc-2.2.0.tar.gz9834809/11/2020 19:51:24 +0000MiniUPnP client release source codeSignature
    miniupnpd-2.2.0.tar.gz24985831/10/2020 09:20:59 +0000MiniUPnP daemon release source codeSignature
    miniupnpd-2.2.0-RC3.tar.gz24987930/10/2020 21:49:49 +0000MiniUPnP daemon release source codeSignature
    miniupnpc-2.1.20201016.tar.gz9768215/10/2020 22:31:09 +0000MiniUPnP client source codeSignature
    miniupnpd-2.2.0-RC2.tar.gz24875628/09/2020 21:57:22 +0000MiniUPnP daemon release source codeSignature
    miniupnpc-2.1.20200928.tar.gz9650828/09/2020 21:56:09 +0000MiniUPnP client source codeSignature
    minissdpd-1.5.20200928.tar.gz3786028/09/2020 21:55:40 +0000MiniSSDPd source codeSignature
    miniupnpd-2.2.0-RC1.tar.gz24777206/06/2020 18:34:50 +0000MiniUPnP daemon release source codeSignature
    miniupnpd-2.2.0-RC0.tar.gz24550716/05/2020 18:03:17 +0000MiniUPnP daemon release source codeSignature
    miniupnpd-2.1.20200510.tar.gz24542610/05/2020 18:23:13 +0000MiniUPnP daemon source codeSignature
    miniupnpd-2.1.20200329.tar.gz24372529/03/2020 09:11:02 +0000MiniUPnP daemon source codeSignature
    miniupnpc-2.1.20191224.tar.gz9474023/12/2019 23:37:32 +0000MiniUPnP client source codeSignature
    miniupnpd-2.1.20191006.tar.gz24325506/10/2019 21:02:31 +0000MiniUPnP daemon source codeSignature
    miniupnpd-2.1.20191005.tar.gz24410005/10/2019 21:33:08 +0000MiniUPnP daemon source codeSignature
    miniupnpd-2.1.20191003.tar.gz24328702/10/2019 22:23:51 +0000MiniUPnP daemon source codeSignature
    miniupnpd-2.1.20190924.tar.gz24100824/09/2019 11:58:15 +0000MiniUPnP daemon source codeSignature
    miniupnpd-2.1.20190902.tar.gz24074201/09/2019 23:03:03 +0000MiniUPnP daemon source codeSignature
    miniupnpd-2.1.20190824.tar.gz24049024/08/2019 09:21:52 +0000MiniUPnP daemon source codeSignature
    minissdpd-1.5.20190824.tar.gz3730024/08/2019 09:17:32 +0000MiniSSDPd source codeSignature
    miniupnpc-2.1.20190824.tar.gz9456424/08/2019 09:12:50 +0000MiniUPnP client source codeSignature
    miniupnpd-2.1.20190630.tar.gz24046630/06/2019 20:27:38 +0000MiniUPnP daemon source codeSignature
    miniupnpd-2.1.20190625.tar.gz24012025/06/2019 21:33:49 +0000MiniUPnP daemon source codeSignature
    miniupnpc-2.1.20190625.tar.gz9446125/06/2019 21:33:26 +0000MiniUPnP client source codeSignature
    miniupnpd-2.1.20190502.tar.gz23605202/05/2019 17:22:23 +0000MiniUPnP daemon source codeSignature
    miniupnpc-2.1.20190408.tar.gz9421608/04/2019 12:50:21 +0000MiniUPnP client source codeSignature
    miniupnpd-2.1.20190408.tar.gz23598908/04/2019 12:50:01 +0000MiniUPnP daemon source codeSignature
    miniupnpc-2.1.20190403.tar.gz9420403/04/2019 15:41:36 +0000MiniUPnP client source codeSignature
    miniupnpd-2.1.20190403.tar.gz23590903/04/2019 15:41:17 +0000MiniUPnP daemon source codeSignature
    minissdpd-1.5.20190210.tar.gz3722710/02/2019 15:21:49 +0000MiniSSDPd source codeSignature
    miniupnpc-2.1.20190210.tar.gz9412510/02/2019 12:46:09 +0000MiniUPnP client source codeSignature
    miniupnpd-2.1.20190210.tar.gz23509310/02/2019 11:20:11 +0000MiniUPnP daemon source codeSignature
    miniupnpd-2.1.20180706.tar.gz23367506/07/2018 12:44:24 +0000MiniUPnP daemon source codeSignature
    miniupnpd-2.1.tar.gz22545808/05/2018 21:50:32 +0000MiniUPnP daemon release source codeSignature
    miniupnpc-2.1.tar.gz9191407/05/2018 11:10:59 +0000MiniUPnP client release source codeSignature
    miniupnpd-2.0.20180503.tar.gz22545403/05/2018 08:33:10 +0000MiniUPnP daemon source code
    miniupnpc-2.0.20180503.tar.gz8820703/05/2018 08:31:22 +0000MiniUPnP client source code
    miniupnpd-2.0.20180422.tar.gz22494222/04/2018 19:48:54 +0000MiniUPnP daemon source code
    miniupnpd-2.0.20180412.tar.gz22483112/04/2018 08:16:25 +0000MiniUPnP daemon source code
    miniupnpd-2.0.20180410.tar.gz22473610/04/2018 07:58:28 +0000MiniUPnP daemon source code
    miniupnpc-2.0.20180410.tar.gz8736310/04/2018 07:52:55 +0000MiniUPnP client source code
    miniupnpc-2.0.20180406.tar.gz8737406/04/2018 10:55:21 +0000MiniUPnP client source code
    minissdpd-1.5.20180223.tar.gz3617923/02/2018 14:24:07 +0000MiniSSDPd source code
    miniupnpc-2.0.20180222.tar.gz8701822/02/2018 15:09:24 +0000MiniUPnP client source code
    miniupnpd-2.0.20180222.tar.gz22369722/02/2018 15:09:14 +0000MiniUPnP daemon source code
    miniupnpd-2.0.20180203.tar.gz22308403/02/2018 22:34:46 +0000MiniUPnP daemon source code
    miniupnpc-2.0.20180203.tar.gz8677203/02/2018 22:34:32 +0000MiniUPnP client source code
    minissdpd-1.5.20180203.tar.gz3584803/02/2018 22:33:08 +0000MiniSSDPd source code
    miniupnpc-2.0.20171212.tar.gz8660712/12/2017 12:03:38 +0000MiniUPnP client source code
    miniupnpd-2.0.20171212.tar.gz22261712/12/2017 12:03:32 +0000MiniUPnP daemon source code
    miniupnpc-2.0.20171102.tar.gz8636302/11/2017 17:58:34 +0000MiniUPnP client source code
    miniupnpc-2.0.20170509.tar.gz8605509/05/2017 10:14:56 +0000MiniUPnP client source code
    miniupnpc-2.0.20170421.tar.gz8598421/04/2017 12:02:26 +0000MiniUPnP client source code
    miniupnpd-2.0.20170421.tar.gz21919121/04/2017 12:02:06 +0000MiniUPnP daemon source code
    miniupnpd-2.0.20161216.tar.gz21811916/12/2016 09:34:08 +0000MiniUPnP daemon source code
    miniupnpc-2.0.20161216.tar.gz8578016/12/2016 09:34:03 +0000MiniUPnP client source code
    minissdpd-1.5.20161216.tar.gz3507816/12/2016 09:33:59 +0000MiniSSDPd source code
    miniupnpd-2.0.tar.gz21780219/04/2016 21:12:01 +0000MiniUPnP daemon release source codeSignature
    miniupnpc-2.0.tar.gz8528719/04/2016 21:07:52 +0000MiniUPnP client release source code
    minissdpd-1.5.20160301.tar.gz3482701/03/2016 18:08:23 +0000MiniSSDPd source code
    miniupnpd-1.9.20160222.tar.gz21754122/02/2016 10:21:40 +0000MiniUPnP daemon source code
    miniupnpd-1.9.20160216.tar.gz21700716/02/2016 12:41:44 +0000MiniUPnP daemon source code
    miniupnpd-1.9.20160212.tar.gz21586612/02/2016 15:22:04 +0000MiniUPnP daemon source code
    miniupnpd-1.9.20160209.tar.gz21341609/02/2016 09:47:03 +0000MiniUPnP daemon source code
    miniupnpc-1.9.20160209.tar.gz8526809/02/2016 09:44:50 +0000MiniUPnP client source code
    minissdpd-1.5.20160119.tar.gz3471119/01/2016 13:39:51 +0000MiniSSDPd source code
    miniupnpd-1.9.20160113.tar.gz21143713/01/2016 16:03:14 +0000MiniUPnP daemon source code
    minissdpd-1.5.tar.gz3440413/01/2016 15:26:53 +0000MiniSSDPd release source code
    miniupnpd-1.9.20151212.tar.gz21091212/12/2015 10:06:07 +0000MiniUPnP daemon source code
    miniupnpd-1.9.20151118.tar.gz21032218/11/2015 08:59:46 +0000MiniUPnP daemon source code
    miniupnpc-1.9.20151026.tar.gz8420826/10/2015 17:07:34 +0000MiniUPnP client source code
    miniupnpc-1.9.20151008.tar.gz8353808/10/2015 16:22:28 +0000MiniUPnP client source code
    miniupnpd-1.9.20150922.tar.gz20870022/09/2015 10:21:50 +0000MiniUPnP daemon source code
    upnpc-exe-win32-20150918.zip10000418/09/2015 12:50:51 +0000Windows executable
    miniupnpc-1.9.20150917.tar.gz8260917/09/2015 14:09:14 +0000MiniUPnP client source code
    upnpc-exe-win32-20150824.zip9952024/08/2015 15:25:18 +0000Windows executable
    minissdpd-1.4.tar.gz3201706/08/2015 13:38:37 +0000MiniSSDPd release source code
    miniupnpc-1.9.20150730.tar.gz8143129/07/2015 22:10:00 +0000MiniUPnP client source code
    miniupnpd-1.9.20150721.tar.gz20756221/07/2015 13:35:51 +0000MiniUPnP daemon source code
    miniupnpc-1.9.20150721.tar.gz8052121/07/2015 13:27:00 +0000MiniUPnP client source code
    libnatpmp-20150609.tar.gz2439209/06/2015 15:40:28 +0000libnatpmp source code
    miniupnpc-1.9.20150609.tar.gz7931109/06/2015 15:39:48 +0000MiniUPnP client source code
    miniupnpd-1.9.20150609.tar.gz20708809/06/2015 15:39:36 +0000MiniUPnP daemon source code
    minissdpd-1.3.20150527.tar.gz3102527/05/2015 09:17:15 +0000MiniSSDPd source code
    miniupnpc-1.9.20150522.tar.gz7908022/05/2015 11:02:27 +0000MiniUPnP client source code
    minissdpd-1.3.20150522.tar.gz3033422/05/2015 11:02:04 +0000MiniSSDPd source code
    miniupnpd-1.9.20150430.tar.gz20593030/04/2015 09:09:27 +0000MiniUPnP daemon source code
    miniupnpc-1.9.20150430.tar.gz7845930/04/2015 08:39:31 +0000MiniUPnP client source code
    miniupnpc-1.9.20150427.tar.gz7842427/04/2015 16:08:42 +0000MiniUPnP client source code
    miniupnpd-1.9.20150427.tar.gz19115727/04/2015 16:08:27 +0000MiniUPnP daemon source code
    miniupnpd-1.9.20150307.tar.gz19091307/03/2015 16:11:51 +0000MiniUPnP daemon source code
    miniupnpc-1.9.20150206.tar.gz7686406/02/2015 14:38:00 +0000MiniUPnP client source code
    miniupnpd-1.9.20141209.tar.gz19318309/12/2014 09:58:34 +0000MiniUPnP daemon source code
    minissdpd-1.3.tar.gz3032609/12/2014 09:57:30 +0000MiniSSDPd release source code
    minissdpd-1.2.20141204.tar.gz2697804/12/2014 10:55:26 +0000MiniSSDPd source code
    miniupnpd-1.9.20141204.tar.gz19259704/12/2014 10:55:03 +0000MiniUPnP daemon source code
    minissdpd-1.2.20141128.tar.gz2679528/11/2014 16:33:10 +0000MiniSSDPd source code
    miniupnpd-1.9.20141128.tar.gz19255828/11/2014 13:31:36 +0000MiniUPnP daemon source code
    miniupnpc-1.9.20141128.tar.gz7654128/11/2014 13:31:15 +0000MiniUPnP client source code
    miniupnpc-1.9.20141117.tar.gz7386517/11/2014 09:51:36 +0000MiniUPnP client source code
    miniupnpc-1.9.20141113.tar.gz7285713/11/2014 10:36:44 +0000MiniUPnP client source code
    minissdpd-1.2.20141108.tar.gz2200108/11/2014 13:55:41 +0000MiniSSDPd source code
    miniupnpc-1.9.20141108.tar.gz7278108/11/2014 13:53:48 +0000MiniUPnP client source code
    miniupnpd-1.9.20141108.tar.gz19241308/11/2014 13:53:38 +0000MiniUPnP daemon source code
    miniupnpd-1.9.tar.gz19218327/10/2014 16:45:34 +0000MiniUPnP daemon release source code
    miniupnpc-1.9.20141027.tar.gz7676327/10/2014 16:45:25 +0000MiniUPnP client source code
    miniupnpd-1.8.20141022.tar.gz19163022/10/2014 09:17:41 +0000MiniUPnP daemon source code
    miniupnpd-1.8.20141021.tar.gz19127021/10/2014 14:18:58 +0000MiniUPnP daemon source code
    miniupnpc-1.9.20140911.tar.gz7685511/09/2014 14:15:23 +0000MiniUPnP client source code
    minissdpd-1.2.20140906.tar.gz2195606/09/2014 08:34:10 +0000MiniSSDPd source code
    miniupnpd-1.8.20140906.tar.gz19118306/09/2014 08:34:02 +0000MiniUPnP daemon source code
    miniupnpc-1.9.20140906.tar.gz7679106/09/2014 08:33:45 +0000MiniUPnP client source code
    miniupnpc-1.9.20140701.tar.gz7673501/07/2014 13:06:51 +0000MiniUPnP client source code
    miniupnpc-1.9.20140610.tar.gz7667410/06/2014 10:28:27 +0000MiniUPnP client source code
    minissdpd-1.2.20140610.tar.gz2190910/06/2014 10:03:29 +0000MiniSSDPd source code
    miniupnpd-1.8.20140523.tar.gz19093623/05/2014 15:48:03 +0000MiniUPnP daemon source code
    upnpc-exe-win32-20140422.zip9750522/04/2014 10:10:07 +0000Windows executable
    miniupnpd-1.8.20140422.tar.gz18722522/04/2014 08:58:56 +0000MiniUPnP daemon source code
    miniupnpd-1.8.20140401.tar.gz18313101/04/2014 10:07:20 +0000MiniUPnP daemon source code
    miniupnpc-1.9.20140401.tar.gz7470301/04/2014 09:49:46 +0000MiniUPnP client source code
    libnatpmp-20140401.tar.gz2330201/04/2014 09:49:44 +0000libnatpmp source code
    miniupnpd-1.8.20140313.tar.gz17712013/03/2014 10:39:11 +0000MiniUPnP daemon source code
    miniupnpd-1.8.20140310.tar.gz17658509/03/2014 23:16:49 +0000MiniUPnP daemon source code
    miniupnpd-1.8.20140225.tar.gz17518325/02/2014 11:01:29 +0000MiniUPnP daemon source code
    miniupnpd-1.8.20140203.tar.gz17011203/02/2014 09:56:05 +0000MiniUPnP daemon source code
    miniupnpc-1.9.tar.gz7423031/01/2014 13:57:40 +0000MiniUPnP client release source code
    miniupnpd-1.8.20140127.tar.gz17046727/01/2014 11:25:34 +0000MiniUPnP daemon source code
    upnpc-exe-win32-20140117.zip9727017/01/2014 11:37:53 +0000Windows executable
    miniupnpd-1.8.20131216.tar.gz17027716/12/2013 16:15:40 +0000MiniUPnP daemon source code
    miniupnpd-1.8.20131213.tar.gz16975313/12/2013 16:18:10 +0000MiniUPnP daemon source code
    miniupnpc-1.8.20131209.tar.gz7390009/12/2013 20:52:54 +0000MiniUPnP client source code
    libnatpmp-20131126.tar.gz2297226/11/2013 08:51:36 +0000libnatpmp source code
    miniupnpc-1.8.20131007.tar.gz7375007/10/2013 10:10:25 +0000MiniUPnP client source code
    libnatpmp-20130911.tar.gz1874411/09/2013 07:35:51 +0000libnatpmp source code
    libnatpmp-20130910.tar.gz1873410/09/2013 20:15:34 +0000libnatpmp source code
    minissdpd-1.2.20130907.tar.gz2023707/09/2013 06:46:31 +0000MiniSSDPd source code
    minissdpd-1.2.20130819.tar.gz2077219/08/2013 16:50:29 +0000MiniSSDPd source code
    miniupnpc-1.8.20130801.tar.gz7342601/08/2013 21:38:05 +0000MiniUPnP client source code
    miniupnpd-1.8.20130730.tar.gz14990430/07/2013 11:37:48 +0000MiniUPnP daemon source code
    miniupnpd-1.8.20130607.tar.gz14952107/06/2013 08:46:17 +0000MiniUPnP daemon source code
    miniupnpd-1.8.20130521.tar.gz14927621/05/2013 09:01:33 +0000MiniUPnP daemon source code
    miniupnpd-1.8.20130503.tar.gz14842003/05/2013 19:27:16 +0000MiniUPnP daemon source code
    miniupnpc-1.8.20130503.tar.gz7185803/05/2013 19:27:07 +0000MiniUPnP client source code
    miniupnpd-1.8.20130426.tar.gz14789026/04/2013 16:57:20 +0000MiniUPnP daemon source code
    miniupnpc-1.8.20130211.tar.gz7072311/02/2013 10:32:44 +0000MiniUPnP client source code
    miniupnpd-1.8.20130207.tar.gz14732507/02/2013 12:29:32 +0000MiniUPnP daemon source code
    miniupnpc-1.8.tar.gz7062406/02/2013 14:31:06 +0000MiniUPnP client release source code
    miniupnpd-1.8.tar.gz14667906/02/2013 14:30:59 +0000MiniUPnP daemon release source code
    upnpc-exe-win32-20121009.zip9651309/10/2012 17:54:12 +0000Windows executable
    miniupnpd-1.7.20121005.tar.gz14439304/10/2012 22:39:05 +0000MiniUPnP daemon source code
    miniupnpc-1.7.20120830.tar.gz7007430/08/2012 08:41:51 +0000MiniUPnP client source code
    miniupnpd-1.7.20120824.tar.gz14196024/08/2012 18:15:01 +0000MiniUPnP daemon source code
    libnatpmp-20120821.tar.gz1783221/08/2012 17:24:46 +0000libnatpmp source code
    miniupnpc-1.7.20120714.tar.gz6957014/07/2012 14:40:47 +0000MiniUPnP client source code
    miniupnpc-1.7.20120711.tar.gz6958010/07/2012 22:27:05 +0000MiniUPnP client source code
    miniupnpd-1.7.20120711.tar.gz14138010/07/2012 22:26:58 +0000MiniUPnP daemon source code
    miniupnpd-1.7.tar.gz13804727/05/2012 23:13:30 +0000MiniUPnP daemon release source code
    miniupnpc-1.7.tar.gz6832724/05/2012 18:17:48 +0000MiniUPnP client release source code
    minissdpd-1.2.tar.gz1987424/05/2012 18:06:24 +0000MiniSSDPd release source code
    miniupnpd-1.6.20120509.tar.gz13714709/05/2012 10:45:44 +0000MiniUPnP daemon source code
    miniupnpc-1.6.20120509.tar.gz6820509/05/2012 10:45:41 +0000MiniUPnP client source code
    minissdpd-1.1.20120509.tar.gz1812309/05/2012 10:45:39 +0000MiniSSDPd source code
    miniupnpd-1.6.20120502.tar.gz13668801/05/2012 22:51:18 +0000MiniUPnP daemon source code
    miniupnpc-1.6.20120502.tar.gz6817001/05/2012 22:51:11 +0000MiniUPnP client source code
    miniupnpd-1.6.20120426.tar.gz13476426/04/2012 16:24:29 +0000MiniUPnP daemon source code
    miniupnpd-1.6.20120424.tar.gz13252223/04/2012 22:43:17 +0000MiniUPnP daemon source code
    miniupnpc-1.6.20120424.tar.gz6806723/04/2012 22:43:10 +0000MiniUPnP client source code
    miniupnpd-1.6.20120420.tar.gz13197220/04/2012 14:58:57 +0000MiniUPnP daemon source code
    miniupnpc-1.6.20120420.tar.gz6806820/04/2012 14:58:39 +0000MiniUPnP client source code
    miniupnpd-1.6.20120419.tar.gz13108818/04/2012 23:41:36 +0000MiniUPnP daemon source code
    miniupnpd-1.6.20120418.tar.gz13087918/04/2012 21:01:10 +0000MiniUPnP daemon source code
    minissdpd-1.1.20120410.tar.gz1805909/04/2012 22:45:38 +0000MiniSSDPd source code
    miniupnpc-1.6.20120410.tar.gz6793409/04/2012 22:45:10 +0000MiniUPnP client source code
    miniupnpd-1.6.20120406.tar.gz12899206/04/2012 17:52:57 +0000MiniUPnP daemon source code
    miniupnpc-1.6.20120320.tar.gz6737420/03/2012 16:55:48 +0000MiniUPnP client source code
    miniupnpd-1.6.20120320.tar.gz12796820/03/2012 16:46:07 +0000MiniUPnP daemon source code
    miniupnpd-1.6.20120305.tar.gz12698505/03/2012 20:42:01 +0000MiniUPnP daemon source code
    miniupnpd-1.6.20120207.tar.gz12742507/02/2012 10:21:16 +0000MiniUPnP daemon source code
    miniupnpd-1.6.20120203.tar.gz12659903/02/2012 15:14:13 +0000MiniUPnP daemon source code
    miniupnpc-1.6.20120125.tar.gz6735425/01/2012 21:12:28 +0000MiniUPnP client source code
    miniupnpc-1.6.20120121.tar.gz6734721/01/2012 14:07:41 +0000MiniUPnP client source code
    miniupnpd-1.6.20120121.tar.gz12602121/01/2012 14:07:33 +0000MiniUPnP daemon source code
    minissdpd-1.1.20120121.tar.gz1776221/01/2012 14:07:16 +0000MiniSSDPd source code
    upnpc-exe-win32-20120121.zip9457521/01/2012 13:59:11 +0000Windows executable
    upnpc-exe-win32-20111212.zip9450712/12/2011 12:33:48 +0000Windows executable
    miniupnpd-1.6.20111118.tar.gz12568318/11/2011 11:26:12 +0000MiniUPnP daemon source code
    minissdpd-1.1.20111007.tar.gz1761107/10/2011 09:47:51 +0000MiniSSDPd source code
    xchat-upnp20110811.patch1032911/08/2011 15:18:25 +0000Patch to add UPnP capabilities to xchat
    xchat-upnp20110811-2.8.8.patch1152911/08/2011 15:18:23 +0000Patch to add UPnP capabilities to xchat
    libnatpmp-20110808.tar.gz1776208/08/2011 21:21:34 +0000libnatpmp source code
    libnatpmp-20110730.tar.gz1768730/07/2011 13:19:31 +0000libnatpmp source code
    minissdpd-1.1.tar.gz1748130/07/2011 13:17:30 +0000MiniSSDPd release source code
    miniupnpd-1.6.20110730.tar.gz12558330/07/2011 13:17:09 +0000MiniUPnP daemon source code
    minissdpd-1.0.20110729.tar.gz1589829/07/2011 08:47:26 +0000MiniSSDPd source code
    miniupnpc-1.6.tar.gz6645425/07/2011 18:03:09 +0000MiniUPnP client release source code
    miniupnpd-1.6.tar.gz12491725/07/2011 16:37:57 +0000MiniUPnP daemon release source code
    minidlna_1.0.21.minissdp1.patch759825/07/2011 14:57:50 +0000Patch for MiniDLNA to use miniSSDPD
    libnatpmp-20110715.tar.gz1794315/07/2011 08:31:40 +0000libnatpmp source code
    miniupnpd-1.5.20110715.tar.gz12451915/07/2011 07:55:17 +0000MiniUPnP daemon source code
    upnpc-exe-win32-20110714.zip9423613/07/2011 23:16:01 +0000Windows executable
    miniupnpd-1.5.20110623.tar.gz12352922/06/2011 22:29:15 +0000MiniUPnP daemon source code
    miniupnpd-1.5.20110620.tar.gz12322120/06/2011 14:11:11 +0000MiniUPnP daemon source code
    miniupnpd-1.5.20110618.tar.gz12317617/06/2011 23:29:18 +0000MiniUPnP daemon source code
    miniupnpc-1.5.20110618.tar.gz6640117/06/2011 23:29:17 +0000MiniUPnP client source code
    libnatpmp-20110618.tar.gz1790117/06/2011 23:29:16 +0000libnatpmp source code
    minissdpd-1.0.20110618.tar.gz1519317/06/2011 23:29:16 +0000MiniSSDPd source code
    minidlna_cvs20110529_minissdp1.patch29/05/2011 21:19:09 +0000Patch for MiniDLNA to use miniSSDPD
    miniupnpd-1.5.20110528.tar.gz12198528/05/2011 09:39:04 +0000MiniUPnP daemon source code
    minidlna_1.0.19_minissdp1.patch908027/05/2011 09:55:04 +0000Patch for MiniDLNA to use miniSSDPD
    miniupnpd-1.5.20110527.tar.gz12089627/05/2011 08:28:35 +0000MiniUPnP daemon source code
    miniupnpc-1.5.20110527.tar.gz6627927/05/2011 08:28:34 +0000MiniUPnP client source code
    libnatpmp-20110527.tar.gz1762727/05/2011 08:28:33 +0000libnatpmp source code
    minissdpd-1.0.20110523.tar.gz1502423/05/2011 12:55:31 +0000MiniSSDPd source code
    miniupnpd-1.5.20110520.tar.gz11922720/05/2011 18:00:41 +0000MiniUPnP daemon source code
    miniupnpd-1.5.20110519.tar.gz11473518/05/2011 22:29:06 +0000MiniUPnP daemon source code
    miniupnpd-1.5.20110516.tar.gz11334816/05/2011 09:32:51 +0000MiniUPnP daemon source code
    miniupnpd-1.5.20110515.tar.gz11313515/05/2011 21:51:29 +0000MiniUPnP daemon source code
    miniupnpc-1.5.20110515.tar.gz6611215/05/2011 21:51:28 +0000MiniUPnP client source code
    miniupnpd-1.5.20110513.tar.gz11102913/05/2011 14:03:12 +0000MiniUPnP daemon source code
    miniupnpc-1.5.20110506.tar.gz6553606/05/2011 16:35:38 +0000MiniUPnP client source code
    miniupnpc-1.4-v6.20100505.zip9183318/04/2011 20:14:11 +0000
    miniupnpd-1.4-v6.20100823.zip22223518/04/2011 20:14:07 +0000
    miniupnpc-1.5.20110418.tar.gz6182018/04/2011 20:09:22 +0000MiniUPnP client source code
    upnpc-exe-win32-20110418.zip9418318/04/2011 17:53:26 +0000Windows executable
    miniupnpc-1.5.20110314.tar.gz5721014/03/2011 14:27:29 +0000MiniUPnP client source code
    miniupnpd-1.5.20110309.tar.gz10007309/03/2011 15:36:12 +0000MiniUPnP daemon source code
    miniupnpd-1.5.20110302.tar.gz10075602/03/2011 16:17:44 +0000MiniUPnP daemon source code
    miniupnpd-1.5.20110221.tar.gz10009220/02/2011 23:48:17 +0000MiniUPnP daemon source code
    upnpc-exe-win32-20110215.zip5540915/02/2011 23:05:00 +0000Windows executable
    miniupnpc-1.5.20110215.tar.gz5488015/02/2011 11:16:04 +0000MiniUPnP client source code
    miniupnpd-1.5.20110214.tar.gz9962914/02/2011 18:00:43 +0000MiniUPnP daemon source code
    minidlna_1.0.18_minissdp1.patch974702/02/2011 15:12:19 +0000Patch for MiniDLNA to use miniSSDPD
    miniupnpd-1.5.20110127.tar.gz9742127/01/2011 17:51:25 +0000MiniUPnP daemon source code
    miniupnpd-1.5.tar.gz9899304/01/2011 09:45:10 +0000MiniUPnP daemon release source code
    miniupnpc-1.5.tar.gz5330904/01/2011 09:45:06 +0000MiniUPnP client release source code
    libnatpmp-20110103.tar.gz1752903/01/2011 17:33:16 +0000libnatpmp source code
    miniupnpc-1.4.20101221.tar.gz5234221/12/2010 16:15:38 +0000MiniUPnP client source code
    upnpc-exe-win32-20101213.zip5235912/12/2010 23:44:01 +0000Windows executable
    libnatpmp-20101211.tar.gz1732411/12/2010 17:20:36 +0000libnatpmp source code
    miniupnpc-1.4.20101209.tar.gz5190009/12/2010 16:17:30 +0000MiniUPnP client source code
    miniupnpd-1.4.20100921.tar.gz9548321/09/2010 15:50:00 +0000MiniUPnP daemon source code
    upnpc-exe-win32-20100825.zip5063625/08/2010 08:42:59 +0000Windows executable
    miniupnpc-1.4.20100609.tar.gz5039009/06/2010 11:03:11 +0000MiniUPnP client source code
    upnpc-exe-win32-20100513.zip5095013/05/2010 16:54:33 +0000Windows executable
    miniupnpd-1.4.20100511.tar.gz9328111/05/2010 16:22:33 +0000MiniUPnP daemon source code
    upnpc-exe-win32-20100418.zip4075817/04/2010 23:00:37 +0000Windows executable
    miniupnpc-1.4.20100418.tar.gz5024517/04/2010 22:18:31 +0000MiniUPnP client source code
    miniupnpc-1.4.20100412.tar.gz5014512/04/2010 20:42:53 +0000MiniUPnP client source code
    miniupnpc-1.4.20100407.tar.gz4975607/04/2010 10:05:08 +0000MiniUPnP client source code
    miniupnpc-1.4.20100405.tar.gz4954905/04/2010 14:34:38 +0000MiniUPnP client source code
    miniupnpd-1.4.20100308.tar.gz9288908/03/2010 17:18:00 +0000MiniUPnP daemon source code
    libnatpmp-20100202.tar.gz1723102/02/2010 18:41:13 +0000libnatpmp source code
    miniupnpc-1.4.20100202.tar.gz4671002/02/2010 18:41:13 +0000MiniUPnP client source code
    miniupnpc-1.4.20100106.tar.gz4665906/01/2010 10:08:21 +0000MiniUPnP client source code
    miniupnpd-1.4.20091222.tar.gz9099322/12/2009 17:23:48 +0000MiniUPnP daemon source code
    libnatpmp-20091219.tar.gz1683919/12/2009 14:35:22 +0000libnatpmp source code
    miniupnpc-1.4.20091213.tar.gz4651012/12/2009 23:05:40 +0000MiniUPnP client source code
    miniupnpc-1.4.20091211.tar.gz4585211/12/2009 16:43:01 +0000MiniUPnP client source code
    upnpc-exe-win32-20091210.zip3866610/12/2009 18:50:27 +0000Windows executable
    miniupnpc-1.4.20091208.tar.gz4339208/12/2009 10:58:26 +0000MiniUPnP client source code
    miniupnpc-1.4.20091203.tar.gz4204003/12/2009 13:56:28 +0000MiniUPnP client source code
    miniupnpd-1.4.20091106.tar.gz9078706/11/2009 21:18:50 +0000MiniUPnP daemon source code
    miniupnpd-1.4.tar.gz9007130/10/2009 09:20:05 +0000MiniUPnP daemon release source code
    miniupnpc-1.4.tar.gz4179030/10/2009 09:20:04 +0000MiniUPnP client release source code
    miniupnpc-20091016.tar.gz4179216/10/2009 09:04:35 +0000MiniUPnP client source code
    miniupnpd-20091010.tar.gz9004310/10/2009 19:21:30 +0000MiniUPnP daemon source code
    miniupnpc-20091010.tar.gz4167110/10/2009 19:21:28 +0000MiniUPnP client source code
    miniupnpd-20090921.tar.gz8947621/09/2009 13:00:04 +0000MiniUPnP daemon source code
    miniupnpc-20090921.tar.gz4163021/09/2009 13:00:03 +0000MiniUPnP client source code
    miniupnpd-20090904.tar.gz8934404/09/2009 16:24:26 +0000MiniUPnP daemon source code
    miniupnpd-20090820.tar.gz8914920/08/2009 09:35:58 +0000MiniUPnP daemon source code
    miniupnpc-20090807.tar.gz4128807/08/2009 14:46:11 +0000MiniUPnP client source code
    miniupnpc-20090729.tar.gz4058829/07/2009 08:47:43 +0000MiniUPnP client source code
    xchat-upnp20061022.patch1025817/07/2009 15:49:46 +0000Patch to add UPnP capabilities to xchat
    miniupnpc-20090713.tar.gz4020613/07/2009 08:53:49 +0000MiniUPnP client source code
    libnatpmp-20090713.tar.gz1426213/07/2009 08:53:49 +0000libnatpmp source code
    miniupnpd-20090605.tar.gz8377404/06/2009 23:32:20 +0000MiniUPnP daemon source code
    miniupnpc-20090605.tar.gz4007704/06/2009 23:32:16 +0000MiniUPnP client source code
    libnatpmp-20090605.tar.gz1381704/06/2009 23:32:15 +0000libnatpmp source code
    miniupnpd-20090516.tar.gz8368916/05/2009 08:47:31 +0000MiniUPnP daemon source code
    miniupnpc-1.3.tar.gz4005817/04/2009 21:27:55 +0000MiniUPnP client release source code
    miniupnpd-1.3.tar.gz8346417/04/2009 20:11:21 +0000MiniUPnP daemon release source code
    libnatpmp-20090310.tar.gz1184710/03/2009 10:19:45 +0000libnatpmp source code
    miniupnpd-20090214.tar.gz8292114/02/2009 11:27:03 +0000MiniUPnP daemon source code
    miniupnpd-20090213.tar.gz8259413/02/2009 19:48:01 +0000MiniUPnP daemon source code
    libnatpmp-20090129.tar.gz1174829/01/2009 21:50:31 +0000libnatpmp source code
    miniupnpc-20090129.tar.gz3997629/01/2009 21:50:30 +0000MiniUPnP client source code
    miniupnpd-20090129.tar.gz8248729/01/2009 21:50:27 +0000MiniUPnP daemon source code
    miniupnpd-20081009.tar.gz8173209/10/2008 12:53:02 +0000MiniUPnP daemon source code
    minissdpd-1.0.tar.gz1299607/10/2008 14:03:49 +0000MiniSSDPd release source code
    miniupnpc-1.2.tar.gz3878707/10/2008 14:03:47 +0000MiniUPnP client release source code
    miniupnpd-1.2.tar.gz8102507/10/2008 14:03:45 +0000MiniUPnP daemon release source code
    miniupnpd-20081006.tar.gz8051006/10/2008 15:50:34 +0000MiniUPnP daemon source code
    minissdpd-20081006.tar.gz1223006/10/2008 15:50:33 +0000MiniSSDPd source code
    libnatpmp-20081006.tar.gz1171006/10/2008 15:50:31 +0000libnatpmp source code
    mediatomb_minissdp-20081006.patch06/10/2008 15:48:18 +0000
    miniupnpc-20081002.tar.gz3829102/10/2008 09:20:18 +0000MiniUPnP client source code
    miniupnpd-20081001.tar.gz7969601/10/2008 13:11:20 +0000MiniUPnP daemon source code
    upnpc-exe-win32-20080925.zip3660225/09/2008 06:59:33 +0000Windows executable
    miniupnpd-20080710.tar.gz7889810/07/2008 09:38:41 +0000MiniUPnP daemon source code
    libnatpmp-20080707.tar.gz1167906/07/2008 22:05:23 +0000libnatpmp source code
    miniupnpc-1.1.tar.gz3823504/07/2008 16:45:24 +0000MiniUPnP client release source code
    miniupnpc-20080703.tar.gz3820403/07/2008 15:47:37 +0000MiniUPnP client source code
    libnatpmp-20080703.tar.gz1157003/07/2008 15:47:25 +0000libnatpmp source code
    upnpc-exe-win32-20080703.zip3613702/07/2008 23:35:14 +0000Windows executable
    libnatpmp-20080702.tar.gz887302/07/2008 17:32:35 +0000libnatpmp source code
    libnatpmp-20080630.tar.gz886430/06/2008 14:20:16 +0000libnatpmp source code
    libnatpmp-20080529.tar.gz739729/05/2008 09:06:25 +0000libnatpmp source code
    upnpc-exe-win32-20080514.zip1422714/05/2008 20:23:19 +0000Windows executable
    libnatpmp-20080428.tar.gz729528/04/2008 03:09:14 +0000libnatpmp source code
    miniupnpd-20080427.tar.gz7876527/04/2008 18:16:36 +0000MiniUPnP daemon source code
    miniupnpc-20080427.tar.gz3761027/04/2008 18:16:35 +0000MiniUPnP client source code
    miniupnpd-1.1.tar.gz7859425/04/2008 17:38:05 +0000MiniUPnP daemon release source code
    miniupnpc-20080423.tar.gz3681823/04/2008 11:57:36 +0000MiniUPnP client source code
    miniupnpd-20080308.tar.gz7567908/03/2008 11:13:29 +0000MiniUPnP daemon source code
    miniupnpd-20080303.tar.gz7420203/03/2008 01:43:16 +0000MiniUPnP daemon source code
    miniupnpd-20080224.tar.gz7277324/02/2008 11:23:17 +0000MiniUPnP daemon source code
    miniupnpc-1.0.tar.gz3622321/02/2008 13:26:46 +0000MiniUPnP client release source code
    miniupnpd-20080221.tar.gz7082321/02/2008 10:23:46 +0000MiniUPnP daemon source code
    miniupnpc-20080217.tar.gz3524316/02/2008 23:47:59 +0000MiniUPnP client source code
    miniupnpd-20080207.tar.gz7064707/02/2008 21:21:00 +0000MiniUPnP daemon source code
    miniupnpc-20080203.tar.gz3492103/02/2008 22:28:11 +0000MiniUPnP client source code
    miniupnpd-1.0.tar.gz6942727/01/2008 22:41:25 +0000MiniUPnP daemon release source code
    upnpc-exe-win32-20080118.zip1358218/01/2008 11:42:16 +0000Windows executable
    miniupnpd-1.0-RC13.tar.gz6789203/01/2008 16:50:21 +0000MiniUPnP daemon release source code
    miniupnpc-1.0-RC13.tar.gz3482003/01/2008 16:50:20 +0000MiniUPnP client release source code
    miniupnpd-20071220.tar.gz6721120/12/2007 12:08:34 +0000MiniUPnP daemon source code
    miniupnpc-20071219.tar.gz3429019/12/2007 18:31:47 +0000MiniUPnP client source code
    minissdpd-1.0-RC12.tar.gz995619/12/2007 18:30:12 +0000MiniSSDPd release source code
    miniupnpd-1.0-RC12.tar.gz6691114/12/2007 17:39:20 +0000MiniUPnP daemon release source code
    miniupnpc-1.0-RC12.tar.gz3254314/12/2007 17:39:19 +0000MiniUPnP client release source code
    miniupnpc-20071213.tar.gz3254113/12/2007 17:09:51 +0000MiniUPnP client source code
    miniupnpd-20071213.tar.gz6682613/12/2007 16:42:50 +0000MiniUPnP daemon source code
    libnatpmp-20071213.tar.gz599713/12/2007 14:56:30 +0000libnatpmp source code
    libnatpmp-20071202.tar.gz566402/12/2007 00:15:28 +0000libnatpmp source code
    miniupnpd-20071103.tar.gz6574002/11/2007 23:58:38 +0000MiniUPnP daemon source code
    miniupnpd-20071102.tar.gz6573302/11/2007 23:05:44 +0000MiniUPnP daemon source code
    miniupnpc-20071103.tar.gz3223902/11/2007 23:05:34 +0000MiniUPnP client source code
    miniupnpd-1.0-RC11.tar.gz6482825/10/2007 13:27:18 +0000MiniUPnP daemon release source code
    miniupnpc-1.0-RC11.tar.gz3216125/10/2007 13:27:17 +0000MiniUPnP client release source code
    upnpc-exe-win32-20071025.zip1280924/10/2007 23:15:55 +0000Windows executable
    miniupnpd-1.0-RC10.tar.gz6267412/10/2007 08:38:33 +0000MiniUPnP daemon release source code
    miniupnpc-1.0-RC10.tar.gz3196212/10/2007 08:38:31 +0000MiniUPnP client release source code
    minissdpd-1.0-RC10.tar.gz951712/10/2007 08:38:30 +0000MiniSSDPd release source code
    miniupnpc-20071003.tar.gz3119903/10/2007 15:30:13 +0000MiniUPnP client source code
    upnpc-exe-win32-20071001.zip1260401/10/2007 17:09:22 +0000Windows executable
    miniupnpd-1.0-RC9.tar.gz5477827/09/2007 19:38:36 +0000MiniUPnP daemon release source code
    minissdpd-1.0-RC9.tar.gz916327/09/2007 17:00:03 +0000MiniSSDPd release source code
    miniupnpc-1.0-RC9.tar.gz3053827/09/2007 17:00:03 +0000MiniUPnP client release source code
    miniupnpd-20070924.tar.gz5233824/09/2007 20:26:05 +0000MiniUPnP daemon source code
    miniupnpd-20070923.tar.gz5106023/09/2007 21:13:34 +0000MiniUPnP daemon source code
    miniupnpc-20070923.tar.gz3024623/09/2007 21:13:33 +0000MiniUPnP client source code
    minissdpd-20070923.tar.gz897823/09/2007 21:13:32 +0000MiniSSDPd source code
    miniupnpc-20070902.tar.gz3020501/09/2007 23:47:23 +0000MiniUPnP client source code
    minissdpd-20070902.tar.gz653901/09/2007 23:47:20 +0000MiniSSDPd source code
    miniupnpd-1.0-RC8.tar.gz5095229/08/2007 10:56:09 +0000MiniUPnP daemon release source code
    miniupnpc-1.0-RC8.tar.gz2931229/08/2007 10:56:08 +0000MiniUPnP client release source code
    miniupnpd-1.0-RC7.tar.gz5061320/07/2007 00:15:45 +0000MiniUPnP daemon release source code
    miniupnpd-1.0-RC6.tar.gz4998612/06/2007 17:12:07 +0000MiniUPnP daemon release source code
    miniupnpc-1.0-RC6.tar.gz2903212/06/2007 17:12:06 +0000MiniUPnP client release source code
    miniupnpd-20070607.tar.gz4976806/06/2007 23:12:00 +0000MiniUPnP daemon source code
    miniupnpd-20070605.tar.gz4971005/06/2007 21:01:53 +0000MiniUPnP daemon source code
    miniupnpd-20070521.tar.gz4837421/05/2007 13:07:43 +0000MiniUPnP daemon source code
    upnpc-exe-win32-20070519.zip1083619/05/2007 13:14:15 +0000Windows executable
    miniupnpc-20070515.tar.gz2580215/05/2007 18:15:25 +0000MiniUPnP client source code
    miniupnpd-1.0-RC5.tar.gz4806410/05/2007 20:22:48 +0000MiniUPnP daemon release source code
    miniupnpc-1.0-RC5.tar.gz2524210/05/2007 20:22:46 +0000MiniUPnP client release source code
    miniupnpd-20070412.tar.gz4780712/04/2007 20:21:48 +0000MiniUPnP daemon source code
    miniupnpd-1.0-RC4.tar.gz4768717/03/2007 11:43:13 +0000MiniUPnP daemon release source code
    miniupnpc-1.0-RC4.tar.gz2508517/03/2007 11:43:10 +0000MiniUPnP client release source code
    miniupnpd-20070311.tar.gz4759911/03/2007 00:25:26 +0000MiniUPnP daemon source code
    miniupnpd-20070208.tar.gz4508407/02/2007 23:04:06 +0000MiniUPnP daemon source code
    miniupnpd-1.0-RC3.tar.gz4468330/01/2007 23:00:44 +0000MiniUPnP daemon release source code
    miniupnpc-1.0-RC3.tar.gz2505530/01/2007 23:00:42 +0000MiniUPnP client release source code
    miniupnpd-20070130.tar.gz4373529/01/2007 23:26:16 +0000MiniUPnP daemon source code
    miniupnpc-20070130.tar.gz2446629/01/2007 23:26:13 +0000MiniUPnP client source code
    miniupnpd-20070127.tar.gz4264327/01/2007 16:02:35 +0000MiniUPnP daemon source code
    miniupnpc-20070127.tar.gz2424127/01/2007 16:02:33 +0000MiniUPnP client source code
    miniupnpd-1.0-RC2.tar.gz4042417/01/2007 16:13:05 +0000MiniUPnP daemon release source code
    miniupnpd-20070112.tar.gz4070812/01/2007 13:40:50 +0000MiniUPnP daemon source code
    miniupnpd-20070111.tar.gz4065111/01/2007 18:50:21 +0000MiniUPnP daemon source code
    miniupnpd-20070108.tar.gz4002508/01/2007 10:02:14 +0000MiniUPnP daemon source code
    miniupnpd-20070103.tar.gz4006503/01/2007 14:39:11 +0000MiniUPnP daemon source code
    miniupnpc-20061214.tar.gz2410614/12/2006 15:43:54 +0000MiniUPnP client source code
    miniupnpd-20061214.tar.gz3975014/12/2006 13:44:51 +0000MiniUPnP daemon source code
    miniupnpd-1.0-RC1.tar.gz3957207/12/2006 10:55:31 +0000MiniUPnP daemon release source code
    miniupnpc-1.0-RC1.tar.gz2358207/12/2006 10:55:30 +0000MiniUPnP client release source code
    upnpc-exe-win32-20061201.zip1037801/12/2006 00:33:08 +0000Windows executable
    miniupnpd20061130.tar.gz3718430/11/2006 12:25:25 +0000MiniUPnP daemon source code
    miniupnpd20061129.tar.gz3604529/11/2006 00:10:49 +0000MiniUPnP daemon source code
    miniupnpd20061127.tar.gz3415526/11/2006 23:15:28 +0000MiniUPnP daemon source code
    miniupnpc20061123.tar.gz2100423/11/2006 22:41:46 +0000MiniUPnP client source code
    miniupnpd-bin-openwrt20061123.tar.gz23/11/2006 22:41:44 +0000Precompiled binaries for openwrt
    miniupnpd20061123.tar.gz3380923/11/2006 22:28:29 +0000MiniUPnP daemon source code
    miniupnpc20061119.tar.gz2089719/11/2006 22:50:37 +0000MiniUPnP client source code
    miniupnpd20061119.tar.gz3258019/11/2006 22:50:36 +0000MiniUPnP daemon source code
    miniupnpd20061117.tar.gz3264617/11/2006 13:29:33 +0000MiniUPnP daemon source code
    upnpc-exe-win32-20061112.zip1026212/11/2006 22:41:25 +0000Windows executable
    miniupnpd20061112.tar.gz3202312/11/2006 21:30:32 +0000MiniUPnP daemon source code
    miniupnpc20061112.tar.gz2104712/11/2006 21:30:31 +0000MiniUPnP client source code
    miniupnpd20061110.tar.gz2792609/11/2006 23:35:02 +0000MiniUPnP daemon source code
    miniupnpc20061110.tar.gz2100909/11/2006 23:32:19 +0000MiniUPnP client source code
    upnpc-exe-win32-20061101.zip1008908/11/2006 20:35:09 +0000Windows executable
    upnpc-exe-win32-20061020.zip918308/11/2006 20:35:08 +0000Windows executable
    upnpc-exe-win32-20060909.zip999408/11/2006 20:35:07 +0000Windows executable
    upnpc-exe-win32-20060801.zip1000208/11/2006 20:35:06 +0000Windows executable
    upnpc-exe-win32-20060711.zip1373308/11/2006 20:35:05 +0000Windows executable
    upnpc-exe-win32-20060709.zip1371308/11/2006 20:35:04 +0000Windows executable
    upnpc-exe-win32-20060704.zip1329708/11/2006 20:35:03 +0000Windows executable
    miniupnpc20061107.tar.gz2070806/11/2006 23:36:57 +0000MiniUPnP client source code
    miniupnpd20061107.tar.gz2699206/11/2006 23:35:06 +0000MiniUPnP daemon source code
    miniupnpc20061106.tar.gz2057506/11/2006 17:02:15 +0000MiniUPnP client source code
    miniupnpd20061106.tar.gz2659706/11/2006 15:39:10 +0000MiniUPnP daemon source code
    miniupnpc20061101.tar.gz2039504/11/2006 18:16:15 +0000MiniUPnP client source code
    miniupnpc20061031.tar.gz2039604/11/2006 18:16:13 +0000MiniUPnP client source code
    miniupnpc20061023.tar.gz2010904/11/2006 18:16:12 +0000MiniUPnP client source code
    miniupnpc20061020.tar.gz1973904/11/2006 18:16:10 +0000MiniUPnP client source code
    miniupnpc20060909.tar.gz1928504/11/2006 18:16:09 +0000MiniUPnP client source code
    miniupnpc20060731.tar.gz1903204/11/2006 18:16:07 +0000MiniUPnP client source code
    miniupnpc20060711.tar.gz1915104/11/2006 18:16:06 +0000MiniUPnP client source code
    miniupnpc20060709.tar.gz1908004/11/2006 18:16:04 +0000MiniUPnP client source code
    miniupnpc20060703.tar.gz1790604/11/2006 18:16:03 +0000MiniUPnP client source code
    miniupnpc-new20060630.tar.gz1484004/11/2006 18:16:01 +0000João Paulo Barraca version of the upnp client
    miniupnpd20061029.tar.gz2419703/11/2006 13:40:30 +0000MiniUPnP daemon source code
    miniupnpd20061027.tar.gz2390403/11/2006 13:40:29 +0000MiniUPnP daemon source code
    miniupnpd20061028.tar.gz2438303/11/2006 13:40:29 +0000MiniUPnP daemon source code
    miniupnpd20061018.tar.gz2305103/11/2006 13:40:28 +0000MiniUPnP daemon source code
    miniupnpd20061023.tar.gz2347803/11/2006 13:40:28 +0000MiniUPnP daemon source code
    miniupnpd20060930.tar.gz2283203/11/2006 13:40:28 +0000MiniUPnP daemon source code
    miniupnpd20060924.tar.gz2203803/11/2006 13:40:27 +0000MiniUPnP daemon source code
    miniupnpd20060919.tar.gz2156603/11/2006 13:40:27 +0000MiniUPnP daemon source code
    miniupnpd20060729.tar.gz1920203/11/2006 13:40:26 +0000MiniUPnP daemon source code
    miniupnpd20060909.tar.gz1995203/11/2006 13:40:26 +0000MiniUPnP daemon source code
    - -

    Home

    -

    Contact: miniupnp _AT_ free _DOT_ fr

    -

    -Valid XHTML 1.0 Transitional -Valid CSS! - - - -Fork me on GitHub -

    - - - - - - diff --git a/bitbake/lib/bb/tests/fetch-testdata/software/pulseaudio/releases/index.html b/bitbake/lib/bb/tests/fetch-testdata/software/pulseaudio/releases/index.html deleted file mode 100644 index bf2d23cf9e..0000000000 --- a/bitbake/lib/bb/tests/fetch-testdata/software/pulseaudio/releases/index.html +++ /dev/null @@ -1,383 +0,0 @@ - - - - Index of /software/pulseaudio/releases - - -

    Index of /software/pulseaudio/releases

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    [ICO]NameLast modifiedSizeDescription

    [PARENTDIR]Parent Directory  -  
    [DIR]bad/2014-01-26 17:50 -  
    [   ]polypaudio-0.1.tar.gz2008-03-28 21:16 387K 
    [   ]polypaudio-0.1.tar.gz.md52011-05-29 11:28 56  
    [   ]polypaudio-0.1.tar.gz.sha12011-05-29 11:28 64  
    [   ]polypaudio-0.2.tar.gz2008-03-28 21:16 460K 
    [   ]polypaudio-0.2.tar.gz.md52011-05-29 11:28 56  
    [   ]polypaudio-0.2.tar.gz.sha12011-05-29 11:28 64  
    [   ]polypaudio-0.3.tar.gz2008-03-28 21:16 470K 
    [   ]polypaudio-0.3.tar.gz.md52011-05-29 11:28 56  
    [   ]polypaudio-0.3.tar.gz.sha12011-05-29 11:28 64  
    [   ]polypaudio-0.4.tar.gz2008-03-28 21:16 486K 
    [   ]polypaudio-0.4.tar.gz.md52011-05-29 11:28 56  
    [   ]polypaudio-0.4.tar.gz.sha12011-05-29 11:28 64  
    [   ]polypaudio-0.5.1.tar.gz2008-03-28 21:16 524K 
    [   ]polypaudio-0.5.1.tar.gz.md52011-05-29 11:28 58  
    [   ]polypaudio-0.5.1.tar.gz.sha12011-05-29 11:28 66  
    [   ]polypaudio-0.5.tar.gz2008-03-28 21:16 518K 
    [   ]polypaudio-0.5.tar.gz.md52011-05-29 11:28 56  
    [   ]polypaudio-0.5.tar.gz.sha12011-05-29 11:28 64  
    [   ]polypaudio-0.6.tar.gz2008-03-28 21:16 448K 
    [   ]polypaudio-0.6.tar.gz.md52011-05-29 11:28 56  
    [   ]polypaudio-0.6.tar.gz.sha12011-05-29 11:28 64  
    [   ]polypaudio-0.7.tar.gz2008-03-28 21:16 924K 
    [   ]polypaudio-0.7.tar.gz.md52011-05-29 11:28 56  
    [   ]polypaudio-0.7.tar.gz.sha12011-05-29 11:28 64  
    [   ]polypaudio-0.8.1.tar.gz2008-03-28 21:16 1.1M 
    [   ]polypaudio-0.8.1.tar.gz.md52011-05-29 11:28 58  
    [   ]polypaudio-0.8.1.tar.gz.sha12011-05-29 11:28 66  
    [   ]polypaudio-0.8.tar.gz2008-03-28 21:16 1.1M 
    [   ]polypaudio-0.8.tar.gz.md52011-05-29 11:28 56  
    [   ]polypaudio-0.8.tar.gz.sha12011-05-29 11:28 64  
    [   ]polypaudio-0.9.0.tar.gz2008-03-28 21:16 1.1M 
    [   ]polypaudio-0.9.0.tar.gz.md52011-05-29 11:28 58  
    [   ]polypaudio-0.9.0.tar.gz.sha12011-05-29 11:28 66  
    [   ]polypaudio-0.9.1.tar.gz2008-03-28 21:16 1.1M 
    [   ]polypaudio-0.9.1.tar.gz.md52011-05-29 11:28 58  
    [   ]polypaudio-0.9.1.tar.gz.sha12011-05-29 11:28 66  
    [   ]pulseaudio-0.9.2.tar.gz2008-03-28 21:16 1.0M 
    [   ]pulseaudio-0.9.2.tar.gz.md52011-05-29 11:28 58  
    [   ]pulseaudio-0.9.2.tar.gz.sha12011-05-29 11:28 66  
    [   ]pulseaudio-0.9.3.tar.gz2008-03-28 21:16 1.1M 
    [   ]pulseaudio-0.9.3.tar.gz.md52011-05-29 11:28 58  
    [   ]pulseaudio-0.9.3.tar.gz.sha12011-05-29 11:28 66  
    [   ]pulseaudio-0.9.4.tar.gz2008-03-28 21:16 1.1M 
    [   ]pulseaudio-0.9.4.tar.gz.md52011-05-29 11:28 58  
    [   ]pulseaudio-0.9.4.tar.gz.sha12011-05-29 11:28 66  
    [   ]pulseaudio-0.9.5.tar.gz2008-03-28 21:16 1.1M 
    [   ]pulseaudio-0.9.5.tar.gz.md52011-05-29 11:28 58  
    [   ]pulseaudio-0.9.5.tar.gz.sha12011-05-29 11:28 66  
    [   ]pulseaudio-0.9.6.tar.gz2008-03-28 21:16 1.1M 
    [   ]pulseaudio-0.9.6.tar.gz.md52011-05-29 11:28 58  
    [   ]pulseaudio-0.9.6.tar.gz.sha12011-05-29 11:28 66  
    [   ]pulseaudio-0.9.7.tar.gz2008-03-28 21:16 1.0M 
    [   ]pulseaudio-0.9.7.tar.gz.md52011-05-29 11:28 58  
    [   ]pulseaudio-0.9.7.tar.gz.sha12011-05-29 11:28 66  
    [   ]pulseaudio-0.9.8.tar.gz2008-03-28 21:16 1.0M 
    [   ]pulseaudio-0.9.8.tar.gz.md52011-05-29 11:28 58  
    [   ]pulseaudio-0.9.8.tar.gz.sha12011-05-29 11:28 66  
    [   ]pulseaudio-0.9.9.tar.gz2008-03-28 21:16 1.0M 
    [   ]pulseaudio-0.9.9.tar.gz.md52011-05-29 11:28 58  
    [   ]pulseaudio-0.9.9.tar.gz.sha12011-05-29 11:28 66  
    [   ]pulseaudio-0.9.10.tar.gz2008-03-30 16:30 1.0M 
    [   ]pulseaudio-0.9.10.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.10.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.11.tar.gz2008-07-24 12:41 1.1M 
    [   ]pulseaudio-0.9.11.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.11.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.12.tar.gz2008-09-09 00:17 1.2M 
    [   ]pulseaudio-0.9.12.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.12.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.13.tar.gz2008-10-06 01:43 1.2M 
    [   ]pulseaudio-0.9.13.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.13.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.14.tar.gz2009-01-12 23:09 1.2M 
    [   ]pulseaudio-0.9.14.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.14.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.15.tar.gz2009-04-13 23:24 1.6M 
    [   ]pulseaudio-0.9.15.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.15.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.16-test1.tar.gz2009-06-23 17:16 1.8M 
    [   ]pulseaudio-0.9.16-test1.tar.gz.md52011-05-29 11:28 65  
    [   ]pulseaudio-0.9.16-test1.tar.gz.sha12011-05-29 11:28 73  
    [   ]pulseaudio-0.9.16.tar.gz2009-09-10 00:49 1.9M 
    [   ]pulseaudio-0.9.16.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.16.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.17.tar.gz2009-09-11 01:32 1.9M 
    [   ]pulseaudio-0.9.17.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.17.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.18.tar.gz2009-09-19 00:43 1.9M 
    [   ]pulseaudio-0.9.18.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.18.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.19.tar.gz2009-09-30 01:30 1.9M 
    [   ]pulseaudio-0.9.19.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.19.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.20.tar.gz2009-11-11 05:10 2.0M 
    [   ]pulseaudio-0.9.20.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.20.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.21.tar.gz2009-11-23 04:23 2.0M 
    [   ]pulseaudio-0.9.21.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.21.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.22.tar.gz2010-11-26 01:12 2.0M 
    [   ]pulseaudio-0.9.22.tar.gz.md52011-05-29 11:28 59  
    [   ]pulseaudio-0.9.22.tar.gz.sha12011-05-29 11:28 67  
    [   ]pulseaudio-0.9.23.tar.gz2011-06-23 21:13 2.0M 
    [   ]pulseaudio-0.9.23.tar.gz.md52011-06-23 21:13 59  
    [   ]pulseaudio-0.9.23.tar.gz.sha12011-06-23 21:13 67  
    [   ]pulseaudio-0.99.1.tar.gz2011-08-02 21:59 2.2M 
    [   ]pulseaudio-0.99.1.tar.gz.md52011-08-02 21:59 59  
    [   ]pulseaudio-0.99.1.tar.gz.sha12011-08-02 21:59 67  
    [   ]pulseaudio-0.99.2.tar.gz2011-08-16 10:19 2.2M 
    [   ]pulseaudio-0.99.2.tar.gz.md52011-08-16 10:19 59  
    [   ]pulseaudio-0.99.2.tar.gz.sha12011-08-16 10:19 67  
    [   ]pulseaudio-0.99.3.tar.gz2011-08-29 17:11 2.2M 
    [   ]pulseaudio-0.99.3.tar.gz.md52011-08-29 17:11 59  
    [   ]pulseaudio-0.99.3.tar.gz.sha12011-08-29 17:11 67  
    [   ]pulseaudio-0.99.4.tar.gz2011-09-15 11:04 2.2M 
    [   ]pulseaudio-0.99.4.tar.gz.md52011-09-15 11:04 59  
    [   ]pulseaudio-0.99.4.tar.gz.sha12011-09-15 11:04 67  
    [   ]pulseaudio-1.0.tar.gz2011-09-27 08:54 2.1M 
    [   ]pulseaudio-1.0.tar.gz.md52011-09-27 08:54 56  
    [   ]pulseaudio-1.0.tar.gz.sha12011-09-27 08:54 64  
    [   ]pulseaudio-1.0.tar.xz2011-09-27 08:54 1.2M 
    [   ]pulseaudio-1.0.tar.xz.md52011-09-27 08:54 56  
    [   ]pulseaudio-1.0.tar.xz.sha12011-09-27 08:54 64  
    [   ]pulseaudio-1.1.tar.gz2011-10-20 13:25 2.1M 
    [   ]pulseaudio-1.1.tar.gz.md52011-10-20 13:25 56  
    [   ]pulseaudio-1.1.tar.gz.sha12011-10-20 13:25 64  
    [   ]pulseaudio-1.1.tar.xz2011-10-20 13:25 1.2M 
    [   ]pulseaudio-1.1.tar.xz.md52011-10-20 13:25 56  
    [   ]pulseaudio-1.1.tar.xz.sha12011-10-20 13:25 64  
    [   ]pulseaudio-1.99.1.tar.gz2012-03-15 12:50 2.2M 
    [   ]pulseaudio-1.99.1.tar.gz.md52012-03-15 12:50 59  
    [   ]pulseaudio-1.99.1.tar.gz.sha12012-03-15 12:50 67  
    [   ]pulseaudio-1.99.1.tar.xz2012-03-15 12:50 1.3M 
    [   ]pulseaudio-1.99.1.tar.xz.md52012-03-15 12:50 59  
    [   ]pulseaudio-1.99.1.tar.xz.sha12012-03-15 12:50 67  
    [   ]pulseaudio-2.0.tar.gz2012-05-11 13:48 2.2M 
    [   ]pulseaudio-2.0.tar.gz.md52012-05-11 13:48 56  
    [   ]pulseaudio-2.0.tar.gz.sha12012-05-11 13:48 64  
    [   ]pulseaudio-2.0.tar.xz2012-05-11 13:48 1.3M 
    [   ]pulseaudio-2.0.tar.xz.md52012-05-11 13:48 56  
    [   ]pulseaudio-2.0.tar.xz.sha12012-05-11 13:48 64  
    [   ]pulseaudio-2.1.tar.gz2012-07-19 12:09 2.2M 
    [   ]pulseaudio-2.1.tar.gz.md52012-07-19 12:09 56  
    [   ]pulseaudio-2.1.tar.gz.sha12012-07-19 12:09 64  
    [   ]pulseaudio-2.1.tar.xz2012-07-19 12:09 1.3M 
    [   ]pulseaudio-2.1.tar.xz.md52012-07-19 12:09 56  
    [   ]pulseaudio-2.1.tar.xz.sha12012-07-19 12:09 64  
    [   ]pulseaudio-2.99.1.tar.gz2012-11-03 11:44 2.3M 
    [   ]pulseaudio-2.99.1.tar.gz.md52012-11-03 11:45 59  
    [   ]pulseaudio-2.99.1.tar.gz.sha12012-11-03 11:45 67  
    [   ]pulseaudio-2.99.1.tar.xz2012-11-03 11:44 1.3M 
    [   ]pulseaudio-2.99.1.tar.xz.md52012-11-03 11:46 59  
    [   ]pulseaudio-2.99.1.tar.xz.sha12012-11-03 11:46 67  
    [   ]pulseaudio-2.99.2.tar.gz2012-11-17 08:21 2.3M 
    [   ]pulseaudio-2.99.2.tar.gz.md52012-11-17 08:22 59  
    [   ]pulseaudio-2.99.2.tar.gz.sha12012-11-17 08:22 67  
    [   ]pulseaudio-2.99.2.tar.xz2012-11-17 08:21 1.3M 
    [   ]pulseaudio-2.99.2.tar.xz.md52012-11-17 08:22 59  
    [   ]pulseaudio-2.99.2.tar.xz.sha12012-11-17 08:22 67  
    [   ]pulseaudio-2.99.3.tar.gz2012-12-07 04:07 2.3M 
    [   ]pulseaudio-2.99.3.tar.gz.md52012-12-07 04:07 59  
    [   ]pulseaudio-2.99.3.tar.gz.sha12012-12-07 04:07 67  
    [   ]pulseaudio-2.99.3.tar.xz2012-12-07 04:07 1.3M 
    [   ]pulseaudio-2.99.3.tar.xz.md52012-12-07 04:07 59  
    [   ]pulseaudio-2.99.3.tar.xz.sha12012-12-07 04:07 67  
    [   ]pulseaudio-3.0.tar.gz2012-12-18 07:22 2.3M 
    [   ]pulseaudio-3.0.tar.gz.md52012-12-18 07:22 56  
    [   ]pulseaudio-3.0.tar.gz.sha12012-12-18 07:22 64  
    [   ]pulseaudio-3.0.tar.xz2012-12-18 07:22 1.3M 
    [   ]pulseaudio-3.0.tar.xz.md52012-12-18 07:22 56  
    [   ]pulseaudio-3.0.tar.xz.sha12012-12-18 07:22 64  
    [   ]pulseaudio-3.99.1.tar.gz2013-04-16 04:10 2.3M 
    [   ]pulseaudio-3.99.1.tar.gz.md52013-04-16 04:10 59  
    [   ]pulseaudio-3.99.1.tar.gz.sha12013-04-16 04:10 67  
    [   ]pulseaudio-3.99.1.tar.xz2013-04-16 04:10 1.3M 
    [   ]pulseaudio-3.99.1.tar.xz.md52013-04-16 04:10 59  
    [   ]pulseaudio-3.99.1.tar.xz.sha12013-04-16 04:10 67  
    [   ]pulseaudio-3.99.2.tar.gz2013-05-23 03:26 2.3M 
    [   ]pulseaudio-3.99.2.tar.gz.md52013-05-23 03:26 59  
    [   ]pulseaudio-3.99.2.tar.gz.sha12013-05-23 03:26 67  
    [   ]pulseaudio-3.99.2.tar.xz2013-05-23 03:26 1.3M 
    [   ]pulseaudio-3.99.2.tar.xz.md52013-05-23 03:26 59  
    [   ]pulseaudio-3.99.2.tar.xz.sha12013-05-23 03:26 67  
    [   ]pulseaudio-4.0.tar.gz2013-06-03 18:52 2.3M 
    [   ]pulseaudio-4.0.tar.gz.md52013-06-03 18:52 56  
    [   ]pulseaudio-4.0.tar.gz.sha12013-06-03 18:52 64  
    [   ]pulseaudio-4.0.tar.xz2013-06-03 18:52 1.3M 
    [   ]pulseaudio-4.0.tar.xz.md52013-06-03 18:52 56  
    [   ]pulseaudio-4.0.tar.xz.sha12013-06-03 18:52 64  
    [   ]pulseaudio-4.99.2.tar.gz2014-01-23 19:10 2.4M 
    [   ]pulseaudio-4.99.2.tar.gz.md52014-01-23 19:10 59  
    [   ]pulseaudio-4.99.2.tar.gz.sha12014-01-23 19:10 67  
    [   ]pulseaudio-4.99.2.tar.xz2014-01-23 19:10 1.4M 
    [   ]pulseaudio-4.99.2.tar.xz.md52014-01-23 19:10 59  
    [   ]pulseaudio-4.99.2.tar.xz.sha12014-01-23 19:10 67  
    [   ]pulseaudio-4.99.3.tar.gz2014-01-29 20:16 2.4M 
    [   ]pulseaudio-4.99.3.tar.gz.md52014-01-29 20:16 59  
    [   ]pulseaudio-4.99.3.tar.gz.sha12014-01-29 20:16 67  
    [   ]pulseaudio-4.99.3.tar.xz2014-01-29 20:16 1.4M 
    [   ]pulseaudio-4.99.3.tar.xz.md52014-01-29 20:16 59  
    [   ]pulseaudio-4.99.3.tar.xz.sha12014-01-29 20:16 67  
    [   ]pulseaudio-4.99.4.tar.gz2014-02-15 06:04 2.5M 
    [   ]pulseaudio-4.99.4.tar.gz.md52014-02-15 06:04 59  
    [   ]pulseaudio-4.99.4.tar.gz.sha12014-02-15 06:04 67  
    [   ]pulseaudio-4.99.4.tar.xz2014-02-15 06:04 1.4M 
    [   ]pulseaudio-4.99.4.tar.xz.md52014-02-15 06:04 59  
    [   ]pulseaudio-4.99.4.tar.xz.sha12014-02-15 06:04 67  
    [   ]pulseaudio-5.0.tar.gz2014-03-03 15:00 2.4M 
    [   ]pulseaudio-5.0.tar.gz.md52014-03-03 15:00 56  
    [   ]pulseaudio-5.0.tar.gz.sha12014-03-03 15:00 64  
    [   ]pulseaudio-5.0.tar.xz2014-03-03 15:00 1.4M 
    [   ]pulseaudio-5.0.tar.xz.md52014-03-03 15:00 56  
    [   ]pulseaudio-5.0.tar.xz.sha12014-03-03 15:00 64  
    [   ]pulseaudio-5.99.1.tar.gz2014-11-21 14:26 2.5M 
    [   ]pulseaudio-5.99.1.tar.gz.md52014-11-21 14:26 59  
    [   ]pulseaudio-5.99.1.tar.gz.sha12014-11-21 14:26 67  
    [   ]pulseaudio-5.99.1.tar.xz2014-11-21 14:27 1.4M 
    [   ]pulseaudio-5.99.1.tar.xz.md52014-11-21 14:27 59  
    [   ]pulseaudio-5.99.1.tar.xz.sha12014-11-21 14:27 67  
    [   ]pulseaudio-5.99.2.tar.gz2014-12-19 13:08 2.5M 
    [   ]pulseaudio-5.99.2.tar.gz.md52014-12-19 13:08 59  
    [   ]pulseaudio-5.99.2.tar.gz.sha12014-12-19 13:08 67  
    [   ]pulseaudio-5.99.2.tar.xz2014-12-19 13:08 1.4M 
    [   ]pulseaudio-5.99.2.tar.xz.md52014-12-19 13:08 59  
    [   ]pulseaudio-5.99.2.tar.xz.sha12014-12-19 13:08 67  
    [   ]pulseaudio-5.99.3.tar.gz2015-01-21 14:45 2.5M 
    [   ]pulseaudio-5.99.3.tar.gz.md52015-01-21 14:45 59  
    [   ]pulseaudio-5.99.3.tar.gz.sha12015-01-21 14:45 67  
    [   ]pulseaudio-5.99.3.tar.xz2015-01-21 14:45 1.4M 
    [   ]pulseaudio-5.99.3.tar.xz.md52015-01-21 14:45 59  
    [   ]pulseaudio-5.99.3.tar.xz.sha12015-01-21 14:45 67  
    [   ]pulseaudio-6.0.tar.gz2015-02-12 19:02 2.5M 
    [   ]pulseaudio-6.0.tar.gz.md52015-02-12 19:02 56  
    [   ]pulseaudio-6.0.tar.gz.sha12015-02-12 19:02 64  
    [   ]pulseaudio-6.0.tar.xz2015-02-12 19:02 1.4M 
    [   ]pulseaudio-6.0.tar.xz.md52015-02-12 19:02 56  
    [   ]pulseaudio-6.0.tar.xz.sha12015-02-12 19:02 64  
    [   ]pulseaudio-6.99.1.tar.gz2015-08-27 17:56 2.6M 
    [   ]pulseaudio-6.99.1.tar.gz.md52015-08-27 17:56 59  
    [   ]pulseaudio-6.99.1.tar.gz.sha12015-08-27 17:56 67  
    [   ]pulseaudio-6.99.1.tar.xz2015-08-27 17:56 1.4M 
    [   ]pulseaudio-6.99.1.tar.xz.md52015-08-27 17:56 59  
    [   ]pulseaudio-6.99.1.tar.xz.sha12015-08-27 17:56 67  
    [   ]pulseaudio-6.99.2.tar.gz2015-09-12 13:56 2.6M 
    [   ]pulseaudio-6.99.2.tar.gz.md52015-09-12 13:56 59  
    [   ]pulseaudio-6.99.2.tar.gz.sha12015-09-12 13:56 67  
    [   ]pulseaudio-6.99.2.tar.xz2015-09-12 13:56 1.4M 
    [   ]pulseaudio-6.99.2.tar.xz.md52015-09-12 13:56 59  
    [   ]pulseaudio-6.99.2.tar.xz.sha12015-09-12 13:56 67  
    [   ]pulseaudio-7.0.tar.gz2015-09-24 03:31 2.6M 
    [   ]pulseaudio-7.0.tar.gz.md5sum2015-09-24 03:31 56  
    [   ]pulseaudio-7.0.tar.gz.sha1sum2015-09-24 03:31 64  
    [   ]pulseaudio-7.0.tar.xz2015-09-24 03:31 1.4M 
    [   ]pulseaudio-7.0.tar.xz.md52015-09-24 03:31 56  
    [   ]pulseaudio-7.0.tar.xz.md5sum2015-09-24 03:31 56  
    [   ]pulseaudio-7.0.tar.xz.sha12015-09-24 03:31 64  
    [   ]pulseaudio-7.0.tar.xz.sha1sum2015-09-24 03:31 64  
    [   ]pulseaudio-7.1.tar.gz2015-10-30 12:51 2.6M 
    [   ]pulseaudio-7.1.tar.gz.md52015-10-30 12:51 56  
    [   ]pulseaudio-7.1.tar.gz.sha12015-10-30 12:51 64  
    [   ]pulseaudio-7.1.tar.xz2015-10-30 12:51 1.4M 
    [   ]pulseaudio-7.1.tar.xz.md52015-10-30 12:51 56  
    [   ]pulseaudio-7.1.tar.xz.sha12015-10-30 12:51 64  
    [   ]pulseaudio-7.99.1.tar.gz2015-12-28 12:38 2.6M 
    [   ]pulseaudio-7.99.1.tar.gz.md52015-12-28 12:38 59  
    [   ]pulseaudio-7.99.1.tar.gz.sha12015-12-28 12:38 67  
    [   ]pulseaudio-7.99.1.tar.xz2015-12-28 12:39 1.4M 
    [   ]pulseaudio-7.99.1.tar.xz.md52015-12-28 12:39 59  
    [   ]pulseaudio-7.99.1.tar.xz.sha12015-12-28 12:39 67  
    [   ]pulseaudio-7.99.2.tar.gz2016-01-12 03:28 2.6M 
    [   ]pulseaudio-7.99.2.tar.gz.md52016-01-12 03:28 59  
    [   ]pulseaudio-7.99.2.tar.gz.sha12016-01-12 03:28 67  
    [   ]pulseaudio-7.99.2.tar.xz2016-01-12 03:28 1.4M 
    [   ]pulseaudio-7.99.2.tar.xz.md52016-01-12 03:28 59  
    [   ]pulseaudio-7.99.2.tar.xz.sha12016-01-12 03:28 67  
    [   ]pulseaudio-8.0.tar.gz2016-01-22 07:38 2.6M 
    [   ]pulseaudio-8.0.tar.gz.md52016-01-22 07:38 56  
    [   ]pulseaudio-8.0.tar.gz.sha12016-01-22 07:38 64  
    [   ]pulseaudio-8.0.tar.xz2016-01-22 07:38 1.4M 
    [   ]pulseaudio-8.0.tar.xz.md52016-01-22 07:38 56  
    [   ]pulseaudio-8.0.tar.xz.sha12016-01-22 07:38 64  
    [   ]pulseaudio-8.99.1.tar.gz2016-05-12 10:58 2.6M 
    [   ]pulseaudio-8.99.1.tar.gz.md52016-05-12 10:58 59  
    [   ]pulseaudio-8.99.1.tar.gz.sha12016-05-12 10:58 67  
    [   ]pulseaudio-8.99.1.tar.xz2016-05-12 10:58 1.5M 
    [   ]pulseaudio-8.99.1.tar.xz.md52016-05-12 10:58 59  
    [   ]pulseaudio-8.99.1.tar.xz.sha12016-05-12 10:58 67  
    [   ]pulseaudio-8.99.2.tar.gz2016-05-29 06:08 2.6M 
    [   ]pulseaudio-8.99.2.tar.gz.md52016-05-29 06:08 59  
    [   ]pulseaudio-8.99.2.tar.gz.sha12016-05-29 06:08 67  
    [   ]pulseaudio-8.99.2.tar.xz2016-05-29 06:08 1.5M 
    [   ]pulseaudio-8.99.2.tar.xz.md52016-05-29 06:08 59  
    [   ]pulseaudio-8.99.2.tar.xz.sha12016-05-29 06:08 67  
    [   ]pulseaudio-9.0.tar.gz2016-06-22 07:09 2.6M 
    [   ]pulseaudio-9.0.tar.gz.md52016-06-22 07:09 56  
    [   ]pulseaudio-9.0.tar.gz.sha12016-06-22 07:09 64  
    [   ]pulseaudio-9.0.tar.xz2016-06-22 07:09 1.5M 
    [   ]pulseaudio-9.0.tar.xz.md52016-06-22 07:09 56  
    [   ]pulseaudio-9.0.tar.xz.sha12016-06-22 07:09 64  
    [   ]pulseaudio-9.99.1.tar.gz2017-01-03 16:14 2.7M 
    [   ]pulseaudio-9.99.1.tar.gz.md52017-01-03 16:14 59  
    [   ]pulseaudio-9.99.1.tar.gz.sha12017-01-03 16:14 67  
    [   ]pulseaudio-9.99.1.tar.xz2017-01-03 16:14 1.5M 
    [   ]pulseaudio-9.99.1.tar.xz.md52017-01-03 16:14 59  
    [   ]pulseaudio-9.99.1.tar.xz.sha12017-01-03 16:14 67  
    [   ]pulseaudio-10.0.tar.gz2017-01-19 00:12 2.7M 
    [   ]pulseaudio-10.0.tar.gz.md52017-01-19 00:12 57  
    [   ]pulseaudio-10.0.tar.gz.sha12017-01-19 00:12 65  
    [   ]pulseaudio-10.0.tar.xz2017-01-19 00:12 1.5M 
    [   ]pulseaudio-10.0.tar.xz.md52017-01-19 00:12 57  
    [   ]pulseaudio-10.0.tar.xz.sha12017-01-19 00:12 65  
    [   ]pulseaudio-10.99.1.tar.gz2017-07-24 23:52 2.8M 
    [   ]pulseaudio-10.99.1.tar.gz.md52017-07-24 23:52 60  
    [   ]pulseaudio-10.99.1.tar.gz.sha12017-07-24 23:52 68  
    [   ]pulseaudio-10.99.1.tar.xz2017-07-24 23:52 1.6M 
    [   ]pulseaudio-10.99.1.tar.xz.md52017-07-24 23:52 60  
    [   ]pulseaudio-10.99.1.tar.xz.sha12017-07-24 23:52 68  
    [   ]pulseaudio-11.0.tar.gz2017-09-05 09:49 2.8M 
    [   ]pulseaudio-11.0.tar.gz.md52017-09-05 09:49 57  
    [   ]pulseaudio-11.0.tar.gz.sha12017-09-05 09:49 65  
    [   ]pulseaudio-11.0.tar.xz2017-09-05 09:49 1.6M 
    [   ]pulseaudio-11.0.tar.xz.md52017-09-05 09:49 57  
    [   ]pulseaudio-11.0.tar.xz.sha12017-09-05 09:49 65  
    [   ]pulseaudio-11.1.tar.gz2017-09-18 15:23 2.8M 
    [   ]pulseaudio-11.1.tar.gz.md52017-09-18 15:23 57  
    [   ]pulseaudio-11.1.tar.gz.sha12017-09-18 15:23 65  
    [   ]pulseaudio-11.1.tar.xz2017-09-18 15:23 1.6M 
    [   ]pulseaudio-11.1.tar.xz.md52017-09-18 15:23 57  
    [   ]pulseaudio-11.1.tar.xz.sha12017-09-18 15:23 65  
    [   ]pulseaudio-11.99.1.tar.gz2018-05-13 06:57 2.8M 
    [   ]pulseaudio-11.99.1.tar.gz.md52018-05-13 06:57 60  
    [   ]pulseaudio-11.99.1.tar.gz.sha12018-05-13 06:57 68  
    [   ]pulseaudio-11.99.1.tar.xz2018-05-13 06:57 1.6M 
    [   ]pulseaudio-11.99.1.tar.xz.md52018-05-13 06:57 60  
    [   ]pulseaudio-11.99.1.tar.xz.sha12018-05-13 06:57 68  
    [   ]pulseaudio-12.0.tar.gz2018-06-20 20:33 2.8M 
    [   ]pulseaudio-12.0.tar.gz.md52018-06-20 20:33 57  
    [   ]pulseaudio-12.0.tar.gz.sha12018-06-20 20:33 65  
    [   ]pulseaudio-12.0.tar.xz2018-06-20 20:33 1.6M 
    [   ]pulseaudio-12.0.tar.xz.md52018-06-20 20:33 57  
    [   ]pulseaudio-12.0.tar.xz.sha12018-06-20 20:33 65  
    [   ]pulseaudio-12.1.tar.gz2018-07-14 16:43 2.8M 
    [   ]pulseaudio-12.1.tar.gz.md52018-07-14 16:43 57  
    [   ]pulseaudio-12.1.tar.gz.sha12018-07-14 16:43 65  
    [   ]pulseaudio-12.1.tar.xz2018-07-14 16:43 1.6M 
    [   ]pulseaudio-12.1.tar.xz.md52018-07-14 16:43 57  
    [   ]pulseaudio-12.1.tar.xz.sha12018-07-14 16:43 65  
    [   ]pulseaudio-12.2.tar.gz2018-07-16 16:12 2.8M 
    [   ]pulseaudio-12.2.tar.gz.sha2562018-07-16 16:13 89  
    [   ]pulseaudio-12.2.tar.xz2018-07-16 16:12 1.6M 
    [   ]pulseaudio-12.2.tar.xz.sha2562018-07-16 16:13 89  
    [   ]pulseaudio-12.99.1.tar.gz2019-07-09 03:16 3.6M 
    [   ]pulseaudio-12.99.1.tar.gz.sha2562019-07-09 03:16 92  
    [   ]pulseaudio-12.99.1.tar.xz2019-07-09 03:16 1.8M 
    [   ]pulseaudio-12.99.1.tar.xz.sha2562019-07-09 03:16 92  
    [   ]pulseaudio-12.99.2.tar.gz2019-08-06 17:47 3.6M 
    [   ]pulseaudio-12.99.2.tar.gz.sha2562019-08-06 17:47 92  
    [   ]pulseaudio-12.99.2.tar.xz2019-08-06 17:47 1.8M 
    [   ]pulseaudio-12.99.2.tar.xz.sha2562019-08-06 17:47 92  
    [   ]pulseaudio-12.99.3.tar.gz2019-09-01 07:44 3.6M 
    [   ]pulseaudio-12.99.3.tar.gz.sha2562019-09-01 07:44 92  
    [   ]pulseaudio-12.99.3.tar.xz2019-09-01 07:44 1.9M 
    [   ]pulseaudio-12.99.3.tar.xz.sha2562019-09-01 07:44 92  
    [   ]pulseaudio-13.0.tar.gz2019-09-13 13:34 3.6M 
    [   ]pulseaudio-13.0.tar.gz.sha2562019-09-13 13:34 89  
    [   ]pulseaudio-13.0.tar.xz2019-09-13 13:34 1.8M 
    [   ]pulseaudio-13.0.tar.xz.sha2562019-09-13 13:34 89  

    -
    Apache/2.4.38 (Debian) Server at freedesktop.org Port 443
    - diff --git a/bitbake/lib/bb/tests/fetch.py b/bitbake/lib/bb/tests/fetch.py deleted file mode 100644 index e027705bf4..0000000000 --- a/bitbake/lib/bb/tests/fetch.py +++ /dev/null @@ -1,3799 +0,0 @@ -# -# BitBake Tests for the Fetcher (fetch2/) -# -# Copyright (C) 2012 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import contextlib -import shutil -import unittest -import unittest.mock -import urllib.parse -import hashlib -import tempfile -import collections -import os -import signal -import tarfile -from bb.fetch2 import URI -from bb.fetch2 import FetchMethod -import bb -import bb.utils -from bb.tests.support.httpserver import HTTPService - -def skipIfNoNetwork(): - if os.environ.get("BB_SKIP_NETTESTS") == "yes": - return unittest.skip("network test") - return lambda f: f - - -@contextlib.contextmanager -def hide_directory(directory): - """Hide the given directory and restore it after the context is left""" - temp_name = directory + ".bak" - os.rename(directory, temp_name) - try: - yield - finally: - os.rename(temp_name, directory) - - -class TestTimeout(Exception): - # Indicate to pytest that this is not a test suite - __test__ = False - -class Timeout(): - - def __init__(self, seconds): - self.seconds = seconds - - def handle_timeout(self, signum, frame): - raise TestTimeout("Test failed: timeout reached") - - def __enter__(self): - signal.signal(signal.SIGALRM, self.handle_timeout) - signal.alarm(self.seconds) - - def __exit__(self, exc_type, exc_val, exc_tb): - signal.alarm(0) - -class URITest(unittest.TestCase): - test_uris = { - "http://www.google.com/index.html" : { - 'uri': 'http://www.google.com/index.html', - 'scheme': 'http', - 'hostname': 'www.google.com', - 'port': None, - 'hostport': 'www.google.com', - 'path': '/index.html', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': {}, - 'relative': False - }, - "http://www.google.com/index.html;param1=value1" : { - 'uri': 'http://www.google.com/index.html;param1=value1', - 'scheme': 'http', - 'hostname': 'www.google.com', - 'port': None, - 'hostport': 'www.google.com', - 'path': '/index.html', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': { - 'param1': 'value1' - }, - 'query': {}, - 'relative': False - }, - "http://www.example.org/index.html?param1=value1" : { - 'uri': 'http://www.example.org/index.html?param1=value1', - 'scheme': 'http', - 'hostname': 'www.example.org', - 'port': None, - 'hostport': 'www.example.org', - 'path': '/index.html', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': { - 'param1': 'value1' - }, - 'relative': False - }, - "http://www.example.org/index.html?qparam1=qvalue1;param2=value2" : { - 'uri': 'http://www.example.org/index.html?qparam1=qvalue1;param2=value2', - 'scheme': 'http', - 'hostname': 'www.example.org', - 'port': None, - 'hostport': 'www.example.org', - 'path': '/index.html', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': { - 'param2': 'value2' - }, - 'query': { - 'qparam1': 'qvalue1' - }, - 'relative': False - }, - # Check that trailing semicolons are handled correctly - "http://www.example.org/index.html?qparam1=qvalue1;param2=value2;" : { - 'uri': 'http://www.example.org/index.html?qparam1=qvalue1;param2=value2', - 'scheme': 'http', - 'hostname': 'www.example.org', - 'port': None, - 'hostport': 'www.example.org', - 'path': '/index.html', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': { - 'param2': 'value2' - }, - 'query': { - 'qparam1': 'qvalue1' - }, - 'relative': False - }, - "http://www.example.com:8080/index.html" : { - 'uri': 'http://www.example.com:8080/index.html', - 'scheme': 'http', - 'hostname': 'www.example.com', - 'port': 8080, - 'hostport': 'www.example.com:8080', - 'path': '/index.html', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': {}, - 'relative': False - }, - "cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg" : { - 'uri': 'cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg', - 'scheme': 'cvs', - 'hostname': 'cvs.handhelds.org', - 'port': None, - 'hostport': 'cvs.handhelds.org', - 'path': '/cvs', - 'userinfo': 'anoncvs', - 'username': 'anoncvs', - 'password': '', - 'params': { - 'module': 'familiar/dist/ipkg' - }, - 'query': {}, - 'relative': False - }, - "cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg": { - 'uri': 'cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg', - 'scheme': 'cvs', - 'hostname': 'cvs.handhelds.org', - 'port': None, - 'hostport': 'cvs.handhelds.org', - 'path': '/cvs', - 'userinfo': 'anoncvs:anonymous', - 'username': 'anoncvs', - 'password': 'anonymous', - 'params': collections.OrderedDict([ - ('tag', 'V0-99-81'), - ('module', 'familiar/dist/ipkg') - ]), - 'query': {}, - 'relative': False - }, - "file://example.diff": { # NOTE: Not RFC compliant! - 'uri': 'file:example.diff', - 'scheme': 'file', - 'hostname': '', - 'port': None, - 'hostport': '', - 'path': 'example.diff', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': {}, - 'relative': True - }, - "file:example.diff": { # NOTE: RFC compliant version of the former - 'uri': 'file:example.diff', - 'scheme': 'file', - 'hostname': '', - 'port': None, - 'hostport': '', - 'path': 'example.diff', - 'userinfo': '', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': {}, - 'relative': True - }, - "file:///tmp/example.diff": { - 'uri': 'file:///tmp/example.diff', - 'scheme': 'file', - 'hostname': '', - 'port': None, - 'hostport': '', - 'path': '/tmp/example.diff', - 'userinfo': '', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': {}, - 'relative': False - }, - "git:///path/example.git": { - 'uri': 'git:///path/example.git', - 'scheme': 'git', - 'hostname': '', - 'port': None, - 'hostport': '', - 'path': '/path/example.git', - 'userinfo': '', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': {}, - 'relative': False - }, - "git:path/example.git": { - 'uri': 'git:path/example.git', - 'scheme': 'git', - 'hostname': '', - 'port': None, - 'hostport': '', - 'path': 'path/example.git', - 'userinfo': '', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': {}, - 'relative': True - }, - "git://example.net/path/example.git": { - 'uri': 'git://example.net/path/example.git', - 'scheme': 'git', - 'hostname': 'example.net', - 'port': None, - 'hostport': 'example.net', - 'path': '/path/example.git', - 'userinfo': '', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': {}, - 'relative': False - }, - "git://tfs-example.org:22/tfs/example%20path/example.git": { - 'uri': 'git://tfs-example.org:22/tfs/example%20path/example.git', - 'scheme': 'git', - 'hostname': 'tfs-example.org', - 'port': 22, - 'hostport': 'tfs-example.org:22', - 'path': '/tfs/example path/example.git', - 'userinfo': '', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': {}, - 'relative': False - }, - "http://somesite.net;someparam=1": { - 'uri': 'http://somesite.net;someparam=1', - 'scheme': 'http', - 'hostname': 'somesite.net', - 'port': None, - 'hostport': 'somesite.net', - 'path': '', - 'userinfo': '', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {"someparam" : "1"}, - 'query': {}, - 'relative': False - }, - "file://somelocation;someparam=1": { - 'uri': 'file:somelocation;someparam=1', - 'scheme': 'file', - 'hostname': '', - 'port': None, - 'hostport': '', - 'path': 'somelocation', - 'userinfo': '', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {"someparam" : "1"}, - 'query': {}, - 'relative': True - }, - "https://www.innodisk.com/Download_file?9BE0BF6657;downloadfilename=EGPL-T101.zip": { - 'uri': 'https://www.innodisk.com/Download_file?9BE0BF6657;downloadfilename=EGPL-T101.zip', - 'scheme': 'https', - 'hostname': 'www.innodisk.com', - 'port': None, - 'hostport': 'www.innodisk.com', - 'path': '/Download_file', - 'userinfo': '', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {"downloadfilename" : "EGPL-T101.zip"}, - 'query': {"9BE0BF6657": None}, - 'relative': False - }, - "file://example@.service": { - 'uri': 'file:example%40.service', - 'scheme': 'file', - 'hostname': '', - 'port': None, - 'hostport': '', - 'path': 'example@.service', - 'userinfo': '', - 'userinfo': '', - 'username': '', - 'password': '', - 'params': {}, - 'query': {}, - 'relative': True - } - - } - - def test_uri(self): - for test_uri, ref in self.test_uris.items(): - uri = URI(test_uri) - - self.assertEqual(str(uri), ref['uri']) - - # expected attributes - self.assertEqual(uri.scheme, ref['scheme']) - - self.assertEqual(uri.userinfo, ref['userinfo']) - self.assertEqual(uri.username, ref['username']) - self.assertEqual(uri.password, ref['password']) - - self.assertEqual(uri.hostname, ref['hostname']) - self.assertEqual(uri.port, ref['port']) - self.assertEqual(uri.hostport, ref['hostport']) - - self.assertEqual(uri.path, ref['path']) - self.assertEqual(uri.params, ref['params']) - - self.assertEqual(uri.relative, ref['relative']) - - def test_dict(self): - for test in self.test_uris.values(): - uri = URI() - - self.assertEqual(uri.scheme, '') - self.assertEqual(uri.userinfo, '') - self.assertEqual(uri.username, '') - self.assertEqual(uri.password, '') - self.assertEqual(uri.hostname, '') - self.assertEqual(uri.port, None) - self.assertEqual(uri.path, '') - self.assertEqual(uri.params, {}) - - - uri.scheme = test['scheme'] - self.assertEqual(uri.scheme, test['scheme']) - - uri.userinfo = test['userinfo'] - self.assertEqual(uri.userinfo, test['userinfo']) - self.assertEqual(uri.username, test['username']) - self.assertEqual(uri.password, test['password']) - - # make sure changing the values doesn't do anything unexpected - uri.username = 'changeme' - self.assertEqual(uri.username, 'changeme') - self.assertEqual(uri.password, test['password']) - uri.password = 'insecure' - self.assertEqual(uri.username, 'changeme') - self.assertEqual(uri.password, 'insecure') - - # reset back after our trickery - uri.userinfo = test['userinfo'] - self.assertEqual(uri.userinfo, test['userinfo']) - self.assertEqual(uri.username, test['username']) - self.assertEqual(uri.password, test['password']) - - uri.hostname = test['hostname'] - self.assertEqual(uri.hostname, test['hostname']) - self.assertEqual(uri.hostport, test['hostname']) - - uri.port = test['port'] - self.assertEqual(uri.port, test['port']) - self.assertEqual(uri.hostport, test['hostport']) - - uri.path = test['path'] - self.assertEqual(uri.path, test['path']) - - uri.params = test['params'] - self.assertEqual(uri.params, test['params']) - - uri.query = test['query'] - self.assertEqual(uri.query, test['query']) - - self.assertEqual(str(uri), test['uri']) - - uri.params = {} - self.assertEqual(uri.params, {}) - self.assertEqual(str(uri), (str(uri).split(";"))[0]) - -class FetcherTest(unittest.TestCase): - - def setUp(self): - self.origdir = os.getcwd() - self.d = bb.data.init() - self.tempdir = tempfile.mkdtemp(prefix="bitbake-fetch-") - self.dldir = os.path.join(self.tempdir, "download") - os.mkdir(self.dldir) - self.d.setVar("DL_DIR", self.dldir) - self.unpackdir = os.path.join(self.tempdir, "unpacked") - os.mkdir(self.unpackdir) - persistdir = os.path.join(self.tempdir, "persistdata") - self.d.setVar("PERSISTENT_DIR", persistdir) - - def tearDown(self): - os.chdir(self.origdir) - if os.environ.get("BB_TMPDIR_NOCLEAN") == "yes": - print("Not cleaning up %s. Please remove manually." % self.tempdir) - else: - bb.process.run('chmod u+rw -R %s' % self.tempdir) - bb.utils.prunedir(self.tempdir) - - def git(self, cmd, cwd=None): - if isinstance(cmd, str): - cmd = 'git -c safe.bareRepository=all ' + cmd - else: - cmd = ['git', '-c', 'safe.bareRepository=all'] + cmd - if cwd is None: - cwd = self.gitdir - return bb.process.run(cmd, cwd=cwd)[0] - - def git_init(self, cwd=None): - self.git('init', cwd=cwd) - # Explicitly set initial branch to master as - # a common setup is to use other default - # branch than master. - self.git(['checkout', '-b', 'master'], cwd=cwd) - - try: - self.git(['config', 'user.email'], cwd=cwd) - except bb.process.ExecutionError: - self.git(['config', 'user.email', 'you@example.com'], cwd=cwd) - - try: - self.git(['config', 'user.name'], cwd=cwd) - except bb.process.ExecutionError: - self.git(['config', 'user.name', 'Your Name'], cwd=cwd) - -class MirrorUriTest(FetcherTest): - - replaceuris = { - ("git://git.invalid.infradead.org/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master", "git://.*/.*", "http://somewhere.org/somedir/") - : "http://somewhere.org/somedir/git2_git.invalid.infradead.org.mtd-utils.git.tar.gz", - ("git://git.invalid.infradead.org/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master", "git://.*/([^/]+/)*([^/]*)", "git://somewhere.org/somedir/\\2;protocol=http") - : "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master;protocol=http", - ("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master", "git://.*/([^/]+/)*([^/]*)", "git://somewhere.org/somedir/\\2;protocol=http") - : "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master;protocol=http", - ("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master", "git://.*/([^/]+/)*([^/]*)", "git://somewhere.org/\\2;protocol=http") - : "git://somewhere.org/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master;protocol=http", - ("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake", "git://git.openembedded.org/bitbake") - : "git://git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", - ("file://sstate-xyz.tgz", "file://.*", "file:///somewhere/1234/sstate-cache") - : "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz", - ("file://sstate-xyz.tgz", "file://.*", "file:///somewhere/1234/sstate-cache/") - : "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz", - ("http://somewhere.org/somedir1/somedir2/somefile_1.2.3.tar.gz", "http://.*/.*", "http://somewhere2.org/somedir3") - : "http://somewhere2.org/somedir3/somefile_1.2.3.tar.gz", - ("http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere2.org/somedir3/somefile_1.2.3.tar.gz") - : "http://somewhere2.org/somedir3/somefile_1.2.3.tar.gz", - ("http://www.apache.org/dist/subversion/subversion-1.7.1.tar.bz2", "http://www.apache.org/dist", "http://archive.apache.org/dist") - : "http://archive.apache.org/dist/subversion/subversion-1.7.1.tar.bz2", - ("http://www.apache.org/dist/subversion/subversion-1.7.1.tar.bz2", "http://.*/.*", "file:///somepath/downloads/") - : "file:///somepath/downloads/subversion-1.7.1.tar.bz2", - ("git://git.invalid.infradead.org/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master", "git://.*/.*", "git://somewhere.org/somedir/BASENAME;protocol=http") - : "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master;protocol=http", - ("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master", "git://.*/.*", "git://somewhere.org/somedir/BASENAME;protocol=http") - : "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master;protocol=http", - ("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master", "git://.*/.*", "git://somewhere.org/somedir/MIRRORNAME;protocol=http") - : "git://somewhere.org/somedir/git.invalid.infradead.org.foo.mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master;protocol=http", - ("http://somewhere.org/somedir1/somedir2/somefile_1.2.3.tar.gz", "http://.*/.*", "http://somewhere2.org") - : "http://somewhere2.org/somefile_1.2.3.tar.gz", - ("http://somewhere.org/somedir1/somedir2/somefile_1.2.3.tar.gz", "http://.*/.*", "http://somewhere2.org/") - : "http://somewhere2.org/somefile_1.2.3.tar.gz", - ("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://git.openembedded.org/bitbake;protocol=http") - : "git://git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http", - ("git://user1@someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://user2@git.openembedded.org/bitbake;protocol=http") - : "git://user2@git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http", - ("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890;protocol=git;branch=master", "git://someserver.org/bitbake", "git://someotherserver.org/bitbake;protocol=https") - : "git://someotherserver.org/bitbake;tag=1234567890123456789012345678901234567890;protocol=https;branch=master", - ("gitsm://git.qemu.org/git/seabios.git/;protocol=https;name=roms/seabios;subpath=roms/seabios;bareclone=1;nobranch=1;rev=1234567890123456789012345678901234567890", "gitsm://.*/.*", "http://petalinux.xilinx.com/sswreleases/rel-v${XILINX_VER_MAIN}/downloads") : "http://petalinux.xilinx.com/sswreleases/rel-v%24%7BXILINX_VER_MAIN%7D/downloads/git2_git.qemu.org.git.seabios.git..tar.gz", - ("https://somewhere.org/example/1.0.0/example;downloadfilename=some-example-1.0.0.tgz", "https://.*/.*", "file:///mirror/PATH") - : "file:///mirror/example/1.0.0/some-example-1.0.0.tgz;downloadfilename=some-example-1.0.0.tgz", - ("https://somewhere.org/example-1.0.0.tgz;downloadfilename=some-example-1.0.0.tgz", "https://.*/.*", "file:///mirror/some-example-1.0.0.tgz") - : "file:///mirror/some-example-1.0.0.tgz;downloadfilename=some-example-1.0.0.tgz", - ("git://git.invalid.infradead.org/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master", r"git://(?!internal\.git\.server).*/.*", "http://somewhere.org/somedir/") - : "http://somewhere.org/somedir/git2_git.invalid.infradead.org.mtd-utils.git.tar.gz", - ("git://internal.git.server.org/mtd-utils.git;tag=1234567890123456789012345678901234567890;branch=master", r"git://(?!internal\.git\.server).*/.*", "http://somewhere.org/somedir/") - : None, - - #Renaming files doesn't work - #("http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz") : "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz" - #("file://sstate-xyz.tgz", "file://.*/.*", "file:///somewhere/1234/sstate-cache") : "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz", - } - - mirrorvar = "http://.*/.* file:///somepath/downloads/ " \ - "git://someserver.org/bitbake git://git.openembedded.org/bitbake " \ - "https?://.*/.* file:///someotherpath/downloads/ " \ - "svn://svn.server1.com/ svn://svn.server2.com/" - - def test_urireplace(self): - self.d.setVar("FILESPATH", ".") - for k, v in self.replaceuris.items(): - ud = bb.fetch.FetchData(k[0], self.d) - ud.setup_localpath(self.d) - mirrors = bb.fetch2.mirror_from_string("%s %s" % (k[1], k[2])) - newuris, uds = bb.fetch2.build_mirroruris(ud, mirrors, self.d) - self.assertEqual([v] if v else [], newuris) - - def test_urilist1(self): - fetcher = bb.fetch.FetchData("http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", self.d) - mirrors = bb.fetch2.mirror_from_string(self.mirrorvar) - uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d) - self.assertEqual(uris, ['file:///somepath/downloads/bitbake-1.0.tar.gz', 'file:///someotherpath/downloads/bitbake-1.0.tar.gz']) - - def test_urilist2(self): - # Catch https:// -> files:// bug - fetcher = bb.fetch.FetchData("https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", self.d) - mirrors = bb.fetch2.mirror_from_string(self.mirrorvar) - uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d) - self.assertEqual(uris, ['file:///someotherpath/downloads/bitbake-1.0.tar.gz']) - - def test_urilistsvn(self): - # Catch svn:// -> svn:// bug - fetcher = bb.fetch.FetchData("svn://svn.server1.com/isource/svnroot/reponame/tags/tagname;module=path_in_tagnamefolder;protocol=https;rev=2", self.d) - mirrors = bb.fetch2.mirror_from_string(self.mirrorvar) - uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d) - self.assertEqual(uris, ['svn://svn.server2.com/isource/svnroot/reponame/tags/tagname;module=path_in_tagnamefolder;protocol=https;rev=2']) - - def test_mirror_of_mirror(self): - # Test if mirror of a mirror works - mirrorvar = self.mirrorvar + " http://.*/.* http://otherdownloads.yoctoproject.org/downloads/" - mirrorvar = mirrorvar + " http://otherdownloads.yoctoproject.org/.* http://downloads2.yoctoproject.org/downloads/" - fetcher = bb.fetch.FetchData("http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", self.d) - mirrors = bb.fetch2.mirror_from_string(mirrorvar) - uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d) - self.assertEqual(uris, ['file:///somepath/downloads/bitbake-1.0.tar.gz', - 'file:///someotherpath/downloads/bitbake-1.0.tar.gz', - 'http://otherdownloads.yoctoproject.org/downloads/bitbake-1.0.tar.gz', - 'http://downloads2.yoctoproject.org/downloads/bitbake-1.0.tar.gz']) - - recmirrorvar = "https://.*/[^/]* http://aaaa/A/A/A/ " \ - "https://.*/[^/]* https://bbbb/B/B/B/" - - def test_recursive(self): - fetcher = bb.fetch.FetchData("https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", self.d) - mirrors = bb.fetch2.mirror_from_string(self.recmirrorvar) - uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d) - self.assertEqual(uris, ['http://aaaa/A/A/A/bitbake/bitbake-1.0.tar.gz', - 'https://bbbb/B/B/B/bitbake/bitbake-1.0.tar.gz', - 'http://aaaa/A/A/A/B/B/bitbake/bitbake-1.0.tar.gz']) - - -class GitDownloadDirectoryNamingTest(FetcherTest): - def setUp(self): - super(GitDownloadDirectoryNamingTest, self).setUp() - self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https" - self.recipe_dir = "git.openembedded.org.bitbake" - self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https;branch=master" - self.mirror_dir = "github.com.openembedded.bitbake.git" - - self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40') - - def setup_mirror_rewrite(self): - self.d.setVar("PREMIRRORS", self.recipe_url + " " + self.mirror_url) - - @skipIfNoNetwork() - def test_that_directory_is_named_after_recipe_url_when_no_mirroring_is_used(self): - self.setup_mirror_rewrite() - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - - fetcher.download() - - dir = os.listdir(self.dldir + "/git2") - self.assertIn(self.recipe_dir, dir) - - @skipIfNoNetwork() - def test_that_directory_exists_for_mirrored_url_and_recipe_url_when_mirroring_is_used(self): - self.setup_mirror_rewrite() - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - - fetcher.download() - - dir = os.listdir(self.dldir + "/git2") - self.assertIn(self.mirror_dir, dir) - self.assertIn(self.recipe_dir, dir) - - @skipIfNoNetwork() - def test_that_recipe_directory_and_mirrored_directory_exists_when_mirroring_is_used_and_the_mirrored_directory_already_exists(self): - self.setup_mirror_rewrite() - fetcher = bb.fetch.Fetch([self.mirror_url], self.d) - fetcher.download() - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - - fetcher.download() - - dir = os.listdir(self.dldir + "/git2") - self.assertIn(self.mirror_dir, dir) - self.assertIn(self.recipe_dir, dir) - - -class TarballNamingTest(FetcherTest): - def setUp(self): - super(TarballNamingTest, self).setUp() - self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https" - self.recipe_tarball = "git2_git.openembedded.org.bitbake.tar.gz" - self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https;branch=master" - self.mirror_tarball = "git2_github.com.openembedded.bitbake.git.tar.gz" - - self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '1') - self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40') - - def setup_mirror_rewrite(self): - self.d.setVar("PREMIRRORS", self.recipe_url + " " + self.mirror_url) - - @skipIfNoNetwork() - def test_that_the_recipe_tarball_is_created_when_no_mirroring_is_used(self): - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - - fetcher.download() - - dir = os.listdir(self.dldir) - self.assertIn(self.recipe_tarball, dir) - - @skipIfNoNetwork() - def test_that_the_mirror_tarball_is_created_when_mirroring_is_used(self): - self.setup_mirror_rewrite() - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - - fetcher.download() - - dir = os.listdir(self.dldir) - self.assertIn(self.mirror_tarball, dir) - - -class GitShallowTarballNamingTest(FetcherTest): - def setUp(self): - super(GitShallowTarballNamingTest, self).setUp() - self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https" - self.recipe_tarball = "gitshallow_git.openembedded.org.bitbake_82ea737-1_master.tar.gz" - self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https;branch=master" - self.mirror_tarball = "gitshallow_github.com.openembedded.bitbake.git_82ea737-1_master.tar.gz" - - self.d.setVar('BB_GIT_SHALLOW', '1') - self.d.setVar('BB_GENERATE_SHALLOW_TARBALLS', '1') - self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40') - - def setup_mirror_rewrite(self): - self.d.setVar("PREMIRRORS", self.recipe_url + " " + self.mirror_url) - - @skipIfNoNetwork() - def test_that_the_tarball_is_named_after_recipe_url_when_no_mirroring_is_used(self): - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - - fetcher.download() - - dir = os.listdir(self.dldir) - self.assertIn(self.recipe_tarball, dir) - - @skipIfNoNetwork() - def test_that_the_mirror_tarball_is_created_when_mirroring_is_used(self): - self.setup_mirror_rewrite() - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - - fetcher.download() - - dir = os.listdir(self.dldir) - self.assertIn(self.mirror_tarball, dir) - - -class CleanTarballTest(FetcherTest): - def setUp(self): - super(CleanTarballTest, self).setUp() - self.recipe_url = "git://git.openembedded.org/bitbake;protocol=https;branch=master" - self.recipe_tarball = "git2_git.openembedded.org.bitbake.tar.gz" - - self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '1') - self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40') - - @skipIfNoNetwork() - def test_that_the_tarball_contents_does_not_leak_info(self): - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - - fetcher.download() - - fetcher.unpack(self.unpackdir) - mtime = bb.process.run('git log --all -1 --format=%ct', - cwd=os.path.join(self.unpackdir, 'git')) - self.assertEqual(len(mtime), 2) - mtime = int(mtime[0]) - - archive = tarfile.open(os.path.join(self.dldir, self.recipe_tarball)) - self.assertNotEqual(len(archive.members), 0) - for member in archive.members: - if member.name == ".": - continue - self.assertEqual(member.uname, 'oe', "user name for %s differs" % member.name) - self.assertEqual(member.uid, 0, "uid for %s differs" % member.name) - self.assertEqual(member.gname, 'oe', "group name for %s differs" % member.name) - self.assertEqual(member.gid, 0, "gid for %s differs" % member.name) - self.assertEqual(member.mtime, mtime, "mtime for %s differs" % member.name) - - -class FetcherLocalTest(FetcherTest): - def setUp(self): - def touch(fn): - with open(fn, 'a'): - os.utime(fn, None) - - super(FetcherLocalTest, self).setUp() - self.localsrcdir = os.path.join(self.tempdir, 'localsrc') - os.makedirs(self.localsrcdir) - touch(os.path.join(self.localsrcdir, 'a')) - touch(os.path.join(self.localsrcdir, 'b')) - touch(os.path.join(self.localsrcdir, 'c@d')) - os.makedirs(os.path.join(self.localsrcdir, 'dir')) - touch(os.path.join(self.localsrcdir, 'dir', 'c')) - touch(os.path.join(self.localsrcdir, 'dir', 'd')) - os.makedirs(os.path.join(self.localsrcdir, 'dir', 'subdir')) - touch(os.path.join(self.localsrcdir, 'dir', 'subdir', 'e')) - touch(os.path.join(self.localsrcdir, r'backslash\x2dsystemd-unit.device')) - bb.process.run('tar cf archive.tar -C dir .', cwd=self.localsrcdir) - bb.process.run('tar czf archive.tar.gz -C dir .', cwd=self.localsrcdir) - bb.process.run('tar cjf archive.tar.bz2 -C dir .', cwd=self.localsrcdir) - self.d.setVar("FILESPATH", self.localsrcdir) - - def fetchUnpack(self, uris): - fetcher = bb.fetch.Fetch(uris, self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - flst = [] - for root, dirs, files in os.walk(self.unpackdir): - for f in files: - flst.append(os.path.relpath(os.path.join(root, f), self.unpackdir)) - flst.sort() - return flst - - def test_local_checksum_fails_no_file(self): - self.d.setVar("SRC_URI", "file://404") - with self.assertRaises(bb.BBHandledException): - bb.fetch.get_checksum_file_list(self.d) - - def test_local(self): - tree = self.fetchUnpack(['file://a', 'file://dir/c']) - self.assertEqual(tree, ['a', 'dir/c']) - - def test_local_at(self): - tree = self.fetchUnpack(['file://c@d']) - self.assertEqual(tree, ['c@d']) - - def test_local_backslash(self): - tree = self.fetchUnpack([r'file://backslash\x2dsystemd-unit.device']) - self.assertEqual(tree, [r'backslash\x2dsystemd-unit.device']) - - def test_local_wildcard(self): - with self.assertRaises(bb.fetch2.ParameterError): - tree = self.fetchUnpack(['file://a', 'file://dir/*']) - - def test_local_dir(self): - tree = self.fetchUnpack(['file://a', 'file://dir']) - self.assertEqual(tree, ['a', 'dir/c', 'dir/d', 'dir/subdir/e']) - - def test_local_subdir(self): - tree = self.fetchUnpack(['file://dir/subdir']) - self.assertEqual(tree, ['dir/subdir/e']) - - def test_local_subdir_file(self): - tree = self.fetchUnpack(['file://dir/subdir/e']) - self.assertEqual(tree, ['dir/subdir/e']) - - def test_local_subdirparam(self): - tree = self.fetchUnpack(['file://a;subdir=bar', 'file://dir;subdir=foo/moo']) - self.assertEqual(tree, ['bar/a', 'foo/moo/dir/c', 'foo/moo/dir/d', 'foo/moo/dir/subdir/e']) - - def test_local_deepsubdirparam(self): - tree = self.fetchUnpack(['file://dir/subdir/e;subdir=bar']) - self.assertEqual(tree, ['bar/dir/subdir/e']) - - def test_local_absolutedir(self): - # Unpacking to an absolute path that is a subdirectory of the root - # should work - tree = self.fetchUnpack(['file://a;subdir=%s' % os.path.join(self.unpackdir, 'bar')]) - - # Unpacking to an absolute path outside of the root should fail - with self.assertRaises(bb.fetch2.UnpackError): - self.fetchUnpack(['file://a;subdir=/bin/sh']) - - def test_local_striplevel(self): - tree = self.fetchUnpack(['file://archive.tar;subdir=bar;striplevel=1']) - self.assertEqual(tree, ['bar/c', 'bar/d', 'bar/subdir/e']) - - def test_local_striplevel_gzip(self): - tree = self.fetchUnpack(['file://archive.tar.gz;subdir=bar;striplevel=1']) - self.assertEqual(tree, ['bar/c', 'bar/d', 'bar/subdir/e']) - - def test_local_striplevel_bzip2(self): - tree = self.fetchUnpack(['file://archive.tar.bz2;subdir=bar;striplevel=1']) - self.assertEqual(tree, ['bar/c', 'bar/d', 'bar/subdir/e']) - - def dummyGitTest(self, suffix): - # Create dummy local Git repo - src_dir = tempfile.mkdtemp(dir=self.tempdir, - prefix='gitfetch_localusehead_') - self.gitdir = os.path.abspath(src_dir) - self.git_init() - self.git(['commit', '--allow-empty', '-m', 'Dummy commit']) - # Use other branch than master - self.git(['checkout', '-b', 'my-devel']) - self.git(['commit', '--allow-empty', '-m', 'Dummy commit 2']) - orig_rev = self.git(['rev-parse', 'HEAD']).strip() - - # Fetch and check revision - self.d.setVar("SRCREV", "AUTOINC") - self.d.setVar("__BBSRCREV_SEEN", "1") - url = "git://" + self.gitdir + ";branch=master;protocol=file;" + suffix - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - unpack_rev = self.git(['rev-parse', 'HEAD'], - cwd=os.path.join(self.unpackdir, 'git')).strip() - self.assertEqual(orig_rev, unpack_rev) - - def test_local_gitfetch_usehead(self): - self.dummyGitTest("usehead=1") - - def test_local_gitfetch_usehead_withname(self): - self.dummyGitTest("usehead=1;name=newName") - - def test_local_gitfetch_shared(self): - self.dummyGitTest("usehead=1;name=sharedName") - alt = os.path.join(self.unpackdir, 'git/.git/objects/info/alternates') - self.assertTrue(os.path.exists(alt)) - - def test_local_gitfetch_noshared(self): - self.d.setVar('BB_GIT_NOSHARED', '1') - self.unpackdir += '_noshared' - self.dummyGitTest("usehead=1;name=noSharedName") - alt = os.path.join(self.unpackdir, 'git/.git/objects/info/alternates') - self.assertFalse(os.path.exists(alt)) - -class FetcherNoNetworkTest(FetcherTest): - def setUp(self): - super().setUp() - # all test cases are based on not having network - self.d.setVar("BB_NO_NETWORK", "1") - - def test_missing(self): - string = "this is a test file\n".encode("utf-8") - self.d.setVarFlag("SRC_URI", "md5sum", hashlib.md5(string).hexdigest()) - self.d.setVarFlag("SRC_URI", "sha256sum", hashlib.sha256(string).hexdigest()) - - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz"))) - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done"))) - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/test-file.tar.gz"], self.d) - with self.assertRaises(bb.fetch2.NetworkAccess): - fetcher.download() - - def test_valid_missing_donestamp(self): - # create the file in the download directory with correct hash - string = "this is a test file\n".encode("utf-8") - with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb") as f: - f.write(string) - - self.d.setVarFlag("SRC_URI", "md5sum", hashlib.md5(string).hexdigest()) - self.d.setVarFlag("SRC_URI", "sha256sum", hashlib.sha256(string).hexdigest()) - - self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz"))) - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done"))) - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/test-file.tar.gz"], self.d) - fetcher.download() - self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done"))) - - def test_invalid_missing_donestamp(self): - # create an invalid file in the download directory with incorrect hash - string = "this is a test file\n".encode("utf-8") - with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb"): - pass - - self.d.setVarFlag("SRC_URI", "md5sum", hashlib.md5(string).hexdigest()) - self.d.setVarFlag("SRC_URI", "sha256sum", hashlib.sha256(string).hexdigest()) - - self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz"))) - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done"))) - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/test-file.tar.gz"], self.d) - with self.assertRaises(bb.fetch2.NetworkAccess): - fetcher.download() - # the existing file should not exist or should have be moved to "bad-checksum" - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz"))) - - def test_nochecksums_missing(self): - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz"))) - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done"))) - # ssh fetch does not support checksums - fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d) - # attempts to download with missing donestamp - with self.assertRaises(bb.fetch2.NetworkAccess): - fetcher.download() - - def test_nochecksums_missing_donestamp(self): - # create a file in the download directory - with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb"): - pass - - self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz"))) - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done"))) - # ssh fetch does not support checksums - fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d) - # attempts to download with missing donestamp - with self.assertRaises(bb.fetch2.NetworkAccess): - fetcher.download() - - def test_nochecksums_has_donestamp(self): - # create a file in the download directory with the donestamp - with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb"): - pass - with open(os.path.join(self.dldir, "test-file.tar.gz.done"), "wb"): - pass - - self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz"))) - self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done"))) - # ssh fetch does not support checksums - fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d) - # should not fetch - fetcher.download() - # both files should still exist - self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz"))) - self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done"))) - - def test_nochecksums_missing_has_donestamp(self): - # create a file in the download directory with the donestamp - with open(os.path.join(self.dldir, "test-file.tar.gz.done"), "wb"): - pass - - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz"))) - self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done"))) - # ssh fetch does not support checksums - fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d) - with self.assertRaises(bb.fetch2.NetworkAccess): - fetcher.download() - # both files should still exist - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz"))) - self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done"))) - -class FetcherNetworkTest(FetcherTest): - @skipIfNoNetwork() - def test_fetch(self): - fetcher = bb.fetch.Fetch(["https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.1.tar.gz"), 57892) - self.d.setVar("BB_NO_NETWORK", "1") - fetcher = bb.fetch.Fetch(["https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.0/")), 9) - self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.1/")), 9) - - @skipIfNoNetwork() - def test_fetch_mirror(self): - self.d.setVar("MIRRORS", "http://.*/.* https://downloads.yoctoproject.org/releases/bitbake") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - @skipIfNoNetwork() - def test_fetch_mirror_of_mirror(self): - self.d.setVar("MIRRORS", "http://.*/.* http://invalid2.yoctoproject.org/ http://invalid2.yoctoproject.org/.* https://downloads.yoctoproject.org/releases/bitbake") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - @skipIfNoNetwork() - def test_fetch_file_mirror_of_mirror(self): - self.d.setVar("FILESPATH", ".") - self.d.setVar("MIRRORS", "http://.*/.* file:///some1where/ file:///some1where/.* file://some2where/ file://some2where/.* https://downloads.yoctoproject.org/releases/bitbake") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) - os.mkdir(self.dldir + "/some2where") - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - @skipIfNoNetwork() - def test_fetch_premirror(self): - self.d.setVar("PREMIRRORS", "http://.*/.* https://downloads.yoctoproject.org/releases/bitbake") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - @skipIfNoNetwork() - def test_fetch_specify_downloadfilename(self): - fetcher = bb.fetch.Fetch(["https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz;downloadfilename=bitbake-v1.0.0.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-v1.0.0.tar.gz"), 57749) - - @skipIfNoNetwork() - def test_fetch_premirror_specify_downloadfilename_regex_uri(self): - self.d.setVar("PREMIRRORS", "http://.*/.* https://downloads.yoctoproject.org/releases/bitbake/") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/1.0.tar.gz;downloadfilename=bitbake-1.0.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - @skipIfNoNetwork() - # BZ13039 - def test_fetch_premirror_specify_downloadfilename_specific_uri(self): - self.d.setVar("PREMIRRORS", "http://invalid.yoctoproject.org/releases/bitbake https://downloads.yoctoproject.org/releases/bitbake") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/1.0.tar.gz;downloadfilename=bitbake-1.0.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - @skipIfNoNetwork() - def test_fetch_premirror_use_downloadfilename_to_fetch(self): - # Ensure downloadfilename is used when fetching from premirror. - self.d.setVar("PREMIRRORS", "http://.*/.* https://downloads.yoctoproject.org/releases/bitbake") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz;downloadfilename=bitbake-1.0.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - @skipIfNoNetwork() - def gitfetcher(self, url1, url2): - def checkrevision(self, fetcher): - fetcher.unpack(self.unpackdir) - revision = self.git(['rev-parse', 'HEAD'], - cwd=os.path.join(self.unpackdir, 'git')).strip() - self.assertEqual(revision, "270a05b0b4ba0959fe0624d2a4885d7b70426da5") - - self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "1") - self.d.setVar("SRCREV", "270a05b0b4ba0959fe0624d2a4885d7b70426da5") - fetcher = bb.fetch.Fetch([url1], self.d) - fetcher.download() - checkrevision(self, fetcher) - # Wipe out the dldir clone and the unpacked source, turn off the network and check mirror tarball works - bb.utils.prunedir(self.dldir + "/git2/") - bb.utils.prunedir(self.unpackdir) - self.d.setVar("BB_NO_NETWORK", "1") - fetcher = bb.fetch.Fetch([url2], self.d) - fetcher.download() - checkrevision(self, fetcher) - - @skipIfNoNetwork() - def test_gitfetch(self): - url1 = url2 = "git://git.openembedded.org/bitbake;branch=master;protocol=https" - self.gitfetcher(url1, url2) - - @skipIfNoNetwork() - def test_gitfetch_goodsrcrev(self): - # SRCREV is set but matches rev= parameter - url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;branch=master;protocol=https" - self.gitfetcher(url1, url2) - - @skipIfNoNetwork() - def test_gitfetch_badsrcrev(self): - # SRCREV is set but does not match rev= parameter - url1 = url2 = "git://git.openembedded.org/bitbake;rev=dead05b0b4ba0959fe0624d2a4885d7b70426da5;branch=master;protocol=https" - self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2) - - @skipIfNoNetwork() - def test_gitfetch_usehead(self): - # Since self.gitfetcher() sets SRCREV we expect this to override - # `usehead=1' and instead fetch the specified SRCREV. See - # test_local_gitfetch_usehead() for a positive use of the usehead - # feature. - url = "git://git.openembedded.org/bitbake;usehead=1;branch=master;protocol=https" - self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url) - - @skipIfNoNetwork() - def test_gitfetch_usehead_withname(self): - # Since self.gitfetcher() sets SRCREV we expect this to override - # `usehead=1' and instead fetch the specified SRCREV. See - # test_local_gitfetch_usehead() for a positive use of the usehead - # feature. - url = "git://git.openembedded.org/bitbake;usehead=1;name=newName;branch=master;protocol=https" - self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url) - - @skipIfNoNetwork() - def test_gitfetch_finds_local_tarball_for_mirrored_url_when_previous_downloaded_by_the_recipe_url(self): - recipeurl = "git://git.openembedded.org/bitbake;branch=master;protocol=https" - mirrorurl = "git://someserver.org/bitbake;branch=master;protocol=https" - self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake") - self.gitfetcher(recipeurl, mirrorurl) - - @skipIfNoNetwork() - def test_gitfetch_finds_local_tarball_when_previous_downloaded_from_a_premirror(self): - recipeurl = "git://someserver.org/bitbake;branch=master;protocol=https" - self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake") - self.gitfetcher(recipeurl, recipeurl) - - @skipIfNoNetwork() - def test_gitfetch_finds_local_repository_when_premirror_rewrites_the_recipe_url(self): - realurl = "https://git.openembedded.org/bitbake" - recipeurl = "git://someserver.org/bitbake;protocol=https;branch=master" - self.sourcedir = self.unpackdir.replace("unpacked", "sourcemirror.git") - os.chdir(self.tempdir) - self.git(['clone', realurl, self.sourcedir], cwd=self.tempdir) - self.d.setVar("PREMIRRORS", "%s git://%s;protocol=file" % (recipeurl, self.sourcedir)) - self.gitfetcher(recipeurl, recipeurl) - - @skipIfNoNetwork() - def test_git_submodule(self): - # URL with ssh submodules - url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=ssh-gitsm-tests;rev=049da4a6cb198d7c0302e9e8b243a1443cb809a7;branch=master;protocol=https" - # Original URL (comment this if you have ssh access to git.yoctoproject.org) - url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=master;rev=a2885dd7d25380d23627e7544b7bbb55014b16ee;branch=master;protocol=https" - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - # Previous cwd has been deleted - os.chdir(os.path.dirname(self.unpackdir)) - fetcher.unpack(self.unpackdir) - - repo_path = os.path.join(self.tempdir, 'unpacked', 'git') - self.assertTrue(os.path.exists(repo_path), msg='Unpacked repository missing') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'bitbake')), msg='bitbake submodule missing') - self.assertFalse(os.path.exists(os.path.join(repo_path, 'na')), msg='uninitialized submodule present') - - # Only when we're running the extended test with a submodule's submodule, can we check this. - if os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1')): - self.assertTrue(os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1', 'bitbake')), msg='submodule of submodule missing') - - @skipIfNoNetwork() - def test_git_submodule_restricted_network_premirrors(self): - # this test is to ensure that premirrors will be tried in restricted network - # that is, BB_ALLOWED_NETWORKS does not contain the domain the url uses - url = "gitsm://github.com/grpc/grpc.git;protocol=https;name=grpc;branch=v1.60.x;rev=0ef13a7555dbaadd4633399242524129eef5e231" - # create a download directory to be used as premirror later - tempdir = tempfile.mkdtemp(prefix="bitbake-fetch-") - dl_premirror = os.path.join(tempdir, "download-premirror") - os.mkdir(dl_premirror) - self.d.setVar("DL_DIR", dl_premirror) - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - # now use the premirror in restricted network - self.d.setVar("DL_DIR", self.dldir) - self.d.setVar("PREMIRRORS", "gitsm://.*/.* gitsm://%s/git2/MIRRORNAME;protocol=file" % dl_premirror) - self.d.setVar("BB_ALLOWED_NETWORKS", "*.some.domain") - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - - @skipIfNoNetwork() - def test_git_submodule_dbus_broker(self): - # The following external repositories have show failures in fetch and unpack operations - # We want to avoid regressions! - url = "gitsm://github.com/bus1/dbus-broker;protocol=https;rev=fc874afa0992d0c75ec25acb43d344679f0ee7d2;branch=main" - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - # Previous cwd has been deleted - os.chdir(os.path.dirname(self.unpackdir)) - fetcher.unpack(self.unpackdir) - - repo_path = os.path.join(self.tempdir, 'unpacked', 'git') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-dvar/config')), msg='Missing submodule config "subprojects/c-dvar"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-list/config')), msg='Missing submodule config "subprojects/c-list"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-rbtree/config')), msg='Missing submodule config "subprojects/c-rbtree"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-sundry/config')), msg='Missing submodule config "subprojects/c-sundry"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-utf8/config')), msg='Missing submodule config "subprojects/c-utf8"') - - @skipIfNoNetwork() - def test_git_submodule_CLI11(self): - url = "gitsm://github.com/CLIUtils/CLI11;protocol=https;rev=bd4dc911847d0cde7a6b41dfa626a85aab213baf;branch=main" - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - # Previous cwd has been deleted - os.chdir(os.path.dirname(self.unpackdir)) - fetcher.unpack(self.unpackdir) - - repo_path = os.path.join(self.tempdir, 'unpacked', 'git') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/googletest/config')), msg='Missing submodule config "extern/googletest"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/json/config')), msg='Missing submodule config "extern/json"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/sanitizers/config')), msg='Missing submodule config "extern/sanitizers"') - - @skipIfNoNetwork() - def test_git_submodule_update_CLI11(self): - """ Prevent regression on update detection not finding missing submodule, or modules without needed commits """ - url = "gitsm://github.com/CLIUtils/CLI11;protocol=https;rev=cf6a99fa69aaefe477cc52e3ef4a7d2d7fa40714;branch=main" - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - - # CLI11 that pulls in a newer nlohmann-json - url = "gitsm://github.com/CLIUtils/CLI11;protocol=https;rev=49ac989a9527ee9bb496de9ded7b4872c2e0e5ca;branch=main" - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - # Previous cwd has been deleted - os.chdir(os.path.dirname(self.unpackdir)) - fetcher.unpack(self.unpackdir) - - repo_path = os.path.join(self.tempdir, 'unpacked', 'git') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/googletest/config')), msg='Missing submodule config "extern/googletest"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/json/config')), msg='Missing submodule config "extern/json"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/sanitizers/config')), msg='Missing submodule config "extern/sanitizers"') - - @skipIfNoNetwork() - def test_git_submodule_aktualizr(self): - url = "gitsm://github.com/advancedtelematic/aktualizr;branch=master;protocol=https;rev=d00d1a04cc2366d1a5f143b84b9f507f8bd32c44" - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - # Previous cwd has been deleted - os.chdir(os.path.dirname(self.unpackdir)) - fetcher.unpack(self.unpackdir) - - repo_path = os.path.join(self.tempdir, 'unpacked', 'git') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/partial/extern/isotp-c/config')), msg='Missing submodule config "partial/extern/isotp-c/config"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/partial/extern/isotp-c/modules/deps/bitfield-c/config')), msg='Missing submodule config "partial/extern/isotp-c/modules/deps/bitfield-c/config"') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'partial/extern/isotp-c/deps/bitfield-c/.git')), msg="Submodule of submodule isotp-c did not unpack properly") - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/tests/tuf-test-vectors/config')), msg='Missing submodule config "tests/tuf-test-vectors/config"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/third_party/googletest/config')), msg='Missing submodule config "third_party/googletest/config"') - self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/third_party/HdrHistogram_c/config')), msg='Missing submodule config "third_party/HdrHistogram_c/config"') - - @skipIfNoNetwork() - def test_git_submodule_iotedge(self): - """ Prevent regression on deeply nested submodules not being checked out properly, even though they were fetched. """ - - # This repository also has submodules where the module (name), path and url do not align - url = "gitsm://github.com/azure/iotedge.git;protocol=https;rev=d76e0316c6f324345d77c48a83ce836d09392699;branch=main" - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - # Previous cwd has been deleted - os.chdir(os.path.dirname(self.unpackdir)) - fetcher.unpack(self.unpackdir) - - repo_path = os.path.join(self.tempdir, 'unpacked', 'git') - - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/README.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/testtools/ctest/README.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/testtools/testrunner/readme.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/testtools/umock-c/readme.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/testtools/umock-c/deps/ctest/README.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/testtools/umock-c/deps/testrunner/readme.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/README.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/README.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/testtools/ctest/README.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/testtools/testrunner/readme.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/testtools/umock-c/readme.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/testtools/umock-c/deps/ctest/README.md')), msg='Missing submodule checkout') - self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/testtools/umock-c/deps/testrunner/readme.md')), msg='Missing submodule checkout') - - @skipIfNoNetwork() - def test_git_submodule_reference_to_parent(self): - self.recipe_url = "gitsm://github.com/gflags/gflags.git;protocol=https;branch=master" - self.d.setVar("SRCREV", "14e1138441bbbb584160cb1c0a0426ec1bac35f1") - with Timeout(60): - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - with self.assertRaises(bb.fetch2.FetchError): - fetcher.download() - -class SVNTest(FetcherTest): - def skipIfNoSvn(): - if not shutil.which("svn"): - return unittest.skip("svn not installed, tests being skipped") - - if not shutil.which("svnadmin"): - return unittest.skip("svnadmin not installed, tests being skipped") - - return lambda f: f - - @skipIfNoSvn() - def setUp(self): - """ Create a local repository """ - - super(SVNTest, self).setUp() - - # Create something we can fetch - src_dir = tempfile.mkdtemp(dir=self.tempdir, - prefix='svnfetch_srcdir_') - src_dir = os.path.abspath(src_dir) - bb.process.run("echo readme > README.md", cwd=src_dir) - - # Store it in a local SVN repository - repo_dir = tempfile.mkdtemp(dir=self.tempdir, - prefix='svnfetch_localrepo_') - repo_dir = os.path.abspath(repo_dir) - bb.process.run("svnadmin create project", cwd=repo_dir) - - self.repo_url = "file://%s/project" % repo_dir - bb.process.run("svn import --non-interactive -m 'Initial import' %s %s/trunk" % (src_dir, self.repo_url), - cwd=repo_dir) - - bb.process.run("svn co %s svnfetch_co" % self.repo_url, cwd=self.tempdir) - # Github won't emulate SVN anymore (see https://github.blog/2023-01-20-sunsetting-subversion-support/) - # Use still accessible svn repo (only trunk to avoid longer downloads) - bb.process.run("svn propset svn:externals 'bitbake https://svn.apache.org/repos/asf/serf/trunk' .", - cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk')) - bb.process.run("svn commit --non-interactive -m 'Add external'", - cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk')) - - self.src_dir = src_dir - self.repo_dir = repo_dir - - @skipIfNoSvn() - def tearDown(self): - os.chdir(self.origdir) - if os.environ.get("BB_TMPDIR_NOCLEAN") == "yes": - print("Not cleaning up %s. Please remove manually." % self.tempdir) - else: - bb.utils.prunedir(self.tempdir) - - @skipIfNoSvn() - @skipIfNoNetwork() - def test_noexternal_svn(self): - # Always match the rev count from setUp (currently rev 2) - url = "svn://%s;module=trunk;protocol=file;rev=2" % self.repo_url.replace('file://', '') - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - os.chdir(os.path.dirname(self.unpackdir)) - fetcher.unpack(self.unpackdir) - - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk')), msg="Missing trunk") - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk', 'README.md')), msg="Missing contents") - self.assertFalse(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols')), msg="External dir should NOT exist") - self.assertFalse(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols', 'fcgi_buckets.h')), msg="External fcgi_buckets.h should NOT exit") - - @skipIfNoSvn() - def test_external_svn(self): - # Always match the rev count from setUp (currently rev 2) - url = "svn://%s;module=trunk;protocol=file;externals=allowed;rev=2" % self.repo_url.replace('file://', '') - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - os.chdir(os.path.dirname(self.unpackdir)) - fetcher.unpack(self.unpackdir) - - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk')), msg="Missing trunk") - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk', 'README.md')), msg="Missing contents") - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols')), msg="External dir should exist") - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'trunk/bitbake/protocols', 'fcgi_buckets.h')), msg="External fcgi_buckets.h should exit") - -class TrustedNetworksTest(FetcherTest): - def test_trusted_network(self): - # Ensure trusted_network returns False when the host IS in the list. - url = "git://Someserver.org/foo;rev=1;branch=master" - self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org someserver.org server2.org server3.org") - self.assertTrue(bb.fetch.trusted_network(self.d, url)) - - def test_wild_trusted_network(self): - # Ensure trusted_network returns true when the *.host IS in the list. - url = "git://Someserver.org/foo;rev=1;branch=master" - self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org *.someserver.org server2.org server3.org") - self.assertTrue(bb.fetch.trusted_network(self.d, url)) - - def test_prefix_wild_trusted_network(self): - # Ensure trusted_network returns true when the prefix matches *.host. - url = "git://git.Someserver.org/foo;rev=1;branch=master" - self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org *.someserver.org server2.org server3.org") - self.assertTrue(bb.fetch.trusted_network(self.d, url)) - - def test_two_prefix_wild_trusted_network(self): - # Ensure trusted_network returns true when the prefix matches *.host. - url = "git://something.git.Someserver.org/foo;rev=1;branch=master" - self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org *.someserver.org server2.org server3.org") - self.assertTrue(bb.fetch.trusted_network(self.d, url)) - - def test_port_trusted_network(self): - # Ensure trusted_network returns True, even if the url specifies a port. - url = "git://someserver.org:8080/foo;rev=1;branch=master" - self.d.setVar("BB_ALLOWED_NETWORKS", "someserver.org") - self.assertTrue(bb.fetch.trusted_network(self.d, url)) - - def test_untrusted_network(self): - # Ensure trusted_network returns False when the host is NOT in the list. - url = "git://someserver.org/foo;rev=1;branch=master" - self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org server2.org server3.org") - self.assertFalse(bb.fetch.trusted_network(self.d, url)) - - def test_wild_untrusted_network(self): - # Ensure trusted_network returns False when the host is NOT in the list. - url = "git://*.someserver.org/foo;rev=1;branch=master" - self.d.setVar("BB_ALLOWED_NETWORKS", "server1.org server2.org server3.org") - self.assertFalse(bb.fetch.trusted_network(self.d, url)) - -class URLHandle(unittest.TestCase): - # Quote password as per RFC3986 - password = urllib.parse.quote(r"!#$%^&*()-_={}[]\|:?,.<>~`", r"!$&'/()*+,;=") - datatable = { - "http://www.google.com/index.html" : ('http', 'www.google.com', '/index.html', '', '', {}), - "cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg" : ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'}), - "cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg" : ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', collections.OrderedDict([('tag', 'V0-99-81'), ('module', 'familiar/dist/ipkg')])), - "git://git.openembedded.org/bitbake;branch=@foo;protocol=https" : ('git', 'git.openembedded.org', '/bitbake', '', '', {'branch': '@foo', 'protocol' : 'https'}), - "file://somelocation;someparam=1": ('file', '', 'somelocation', '', '', {'someparam': '1'}), - "file://example@.service": ('file', '', 'example@.service', '', '', {}), - "https://somesite.com/somerepo.git;user=anyUser:idtoken=1234" : ('https', 'somesite.com', '/somerepo.git', '', '', {'user': 'anyUser:idtoken=1234'}), - 'git://s.o-me_ONE:%s@git.openembedded.org/bitbake;branch=main;protocol=https' % password: ('git', 'git.openembedded.org', '/bitbake', 's.o-me_ONE', password, {'branch': 'main', 'protocol' : 'https'}), - } - # we require a pathname to encodeurl but users can still pass such urls to - # decodeurl and we need to handle them - decodedata = datatable.copy() - decodedata.update({ - "http://somesite.net;someparam=1": ('http', 'somesite.net', '/', '', '', {'someparam': '1'}), - "npmsw://some.registry.url;package=@pkg;version=latest": ('npmsw', 'some.registry.url', '/', '', '', {'package': '@pkg', 'version': 'latest'}), - }) - - def test_decodeurl(self): - for k, v in self.decodedata.items(): - result = bb.fetch.decodeurl(k) - self.assertEqual(result, v) - - def test_encodeurl(self): - for k, v in self.datatable.items(): - result = bb.fetch.encodeurl(v) - if result.startswith("file:"): - result = urllib.parse.unquote(result) - self.assertEqual(result, k) - -class FetchLatestVersionTest(FetcherTest): - - test_git_uris = { - # version pattern "X.Y.Z" - ("mx-1.0", "git://github.com/clutter-project/mx.git;branch=mx-1.4;protocol=https", "9b1db6b8060bd00b121a692f942404a24ae2960f", "", "") - : "1.99.4", - # version pattern "vX.Y" - # mirror of git.infradead.org since network issues interfered with testing - ("mtd-utils", "git://git.yoctoproject.org/mtd-utils.git;branch=master;protocol=https", "ca39eb1d98e736109c64ff9c1aa2a6ecca222d8f", "", "") - : "1.5.0", - # version pattern "pkg_name-X.Y" - # mirror of git://anongit.freedesktop.org/git/xorg/proto/presentproto since network issues interfered with testing - ("presentproto", "git://git.yoctoproject.org/bbfetchtests-presentproto;branch=master;protocol=https", "24f3a56e541b0a9e6c6ee76081f441221a120ef9", "", "") - : "1.0", - # version pattern "pkg_name-vX.Y.Z" - ("dtc", "git://git.yoctoproject.org/bbfetchtests-dtc.git;branch=master;protocol=https", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "", "") - : "1.4.0", - # combination version pattern - ("sysprof", "git://git.yoctoproject.org/sysprof.git;protocol=https;branch=master", "cd44ee6644c3641507fb53b8a2a69137f2971219", "", "") - : "1.2.0", - ("u-boot-mkimage", "git://git.yoctoproject.org/bbfetchtests-u-boot.git;branch=master;protocol=https", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "", "") - : "2014.01", - # version pattern "yyyymmdd" - ("mobile-broadband-provider-info", "git://git.yoctoproject.org/mobile-broadband-provider-info.git;protocol=https;branch=master", "4ed19e11c2975105b71b956440acdb25d46a347d", "", "") - : "20120614", - # packages with a valid UPSTREAM_CHECK_GITTAGREGEX - # mirror of git://anongit.freedesktop.org/xorg/driver/xf86-video-omap since network issues interfered with testing - ("xf86-video-omap", "git://git.yoctoproject.org/bbfetchtests-xf86-video-omap;branch=master;protocol=https", "ae0394e687f1a77e966cf72f895da91840dffb8f", r"(?P(\d+\.(\d\.?)*))", "") - : "0.4.3", - ("build-appliance-image", "git://git.yoctoproject.org/poky;branch=master;protocol=https", "b37dd451a52622d5b570183a81583cc34c2ff555", r"(?P(([0-9][\.|_]?)+[0-9]))", "") - : "11.0.0", - ("chkconfig-alternatives-native", "git://github.com/kergoth/chkconfig;branch=sysroot;protocol=https", "cd437ecbd8986c894442f8fce1e0061e20f04dee", r"chkconfig\-(?P((\d+[\.\-_]*)+))", "") - : "1.3.59", - ("remake", "git://github.com/rocky/remake.git;protocol=https;branch=master", "f05508e521987c8494c92d9c2871aec46307d51d", r"(?P(\d+\.(\d+\.)*\d*(\+dbg\d+(\.\d+)*)*))", "") - : "3.82+dbg0.9", - ("sysdig", "git://github.com/draios/sysdig.git;branch=dev;protocol=https", "4fb6288275f567f63515df0ff0a6518043ecfa9b", r"^(?P\d+(\.\d+)+)", "10.0.0") - : "0.28.0", - } - - WgetTestData = collections.namedtuple("WgetTestData", ["pn", "path", "pv", "check_uri", "check_regex"], defaults=[None, None, None]) - test_wget_uris = { - # - # packages with versions inside directory name - # - # http://kernel.org/pub/linux/utils/util-linux/v2.23/util-linux-2.24.2.tar.bz2 - WgetTestData("util-linux", "/pub/linux/utils/util-linux/v2.23/util-linux-2.24.2.tar.bz2") - : "2.24.2", - # http://www.abisource.com/downloads/enchant/1.6.0/enchant-1.6.0.tar.gz - WgetTestData("enchant", "/downloads/enchant/1.6.0/enchant-1.6.0.tar.gz") - : "1.6.0", - # http://www.cmake.org/files/v2.8/cmake-2.8.12.1.tar.gz - WgetTestData("cmake", "/files/v2.8/cmake-2.8.12.1.tar.gz") - : "2.8.12.1", - # https://download.gnome.org/sources/libxml2/2.9/libxml2-2.9.14.tar.xz - WgetTestData("libxml2", "/software/libxml2/2.9/libxml2-2.9.14.tar.xz") - : "2.10.3", - # - # packages with versions only in current directory - # - # https://downloads.yoctoproject.org/releases/eglibc/eglibc-2.18-svnr23787.tar.bz2 - WgetTestData("eglic", "/releases/eglibc/eglibc-2.18-svnr23787.tar.bz2") - : "2.19", - # https://downloads.yoctoproject.org/releases/gnu-config/gnu-config-20120814.tar.bz2 - WgetTestData("gnu-config", "/releases/gnu-config/gnu-config-20120814.tar.bz2") - : "20120814", - # - # packages with "99" in the name of possible version - # - # http://freedesktop.org/software/pulseaudio/releases/pulseaudio-4.0.tar.xz - WgetTestData("pulseaudio", "/software/pulseaudio/releases/pulseaudio-4.0.tar.xz") - : "5.0", - # http://xorg.freedesktop.org/releases/individual/xserver/xorg-server-1.15.1.tar.bz2 - WgetTestData("xserver-xorg", "/releases/individual/xserver/xorg-server-1.15.1.tar.bz2") - : "1.15.1", - # - # packages with valid UPSTREAM_CHECK_URI and UPSTREAM_CHECK_REGEX - # - # http://www.cups.org/software/1.7.2/cups-1.7.2-source.tar.bz2 - # https://github.com/apple/cups/releases - WgetTestData("cups", "/software/1.7.2/cups-1.7.2-source.tar.bz2", check_uri="/apple/cups/releases", check_regex=r"(?Pcups\-)(?P((\d+[\.\-_]*)+))\-source\.tar\.gz") - : "2.0.0", - # http://download.oracle.com/berkeley-db/db-5.3.21.tar.gz - # http://ftp.debian.org/debian/pool/main/d/db5.3/ - WgetTestData("db", "/berkeley-db/db-5.3.21.tar.gz", check_uri="/debian/pool/main/d/db5.3/", check_regex=r"(?Pdb5\.3_)(?P\d+(\.\d+)+).+\.orig\.tar\.xz") - : "5.3.10", - # - # packages where the tarball compression changed in the new version - # - # http://ftp.debian.org/debian/pool/main/m/minicom/minicom_2.7.1.orig.tar.gz - WgetTestData("minicom", "/debian/pool/main/m/minicom/minicom_2.7.1.orig.tar.gz") - : "2.8", - - # - # packages where the path doesn't actually contain the filename, so downloadfilename should be respected - # - WgetTestData("miniupnpd", "/software/miniupnp/download.php?file=miniupnpd_2.1.20191006.tar.gz;downloadfilename=miniupnpd_2.1.20191006.tar.gz", pv="2.1.20191006", check_uri="/software/miniupnp/download.php", check_regex=r"miniupnpd-(?P\d+(\.\d+)+)\.tar") - : "2.3.7", - } - - test_crate_uris = { - # basic example; version pattern "A.B.C+cargo-D.E.F" - ("cargo-c", "crate://crates.io/cargo-c/0.9.18+cargo-0.69") - : "0.9.29" - } - - @skipIfNoNetwork() - def test_git_latest_versionstring(self): - for k, v in self.test_git_uris.items(): - with self.subTest(pn=k[0]): - self.d.setVar("PN", k[0]) - self.d.setVar("SRCREV", k[2]) - self.d.setVar("UPSTREAM_CHECK_GITTAGREGEX", k[3]) - ud = bb.fetch2.FetchData(k[1], self.d) - pupver= ud.method.latest_versionstring(ud, self.d) - verstring = pupver[0] - self.assertTrue(verstring, msg="Could not find upstream version for %s" % k[0]) - r = bb.utils.vercmp_string(v, verstring) - self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring)) - if k[4]: - r = bb.utils.vercmp_string(verstring, k[4]) - self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], verstring, k[4])) - - def test_wget_latest_versionstring(self): - testdata = os.path.dirname(os.path.abspath(__file__)) + "/fetch-testdata" - server = HTTPService(testdata, host="127.0.0.1") - server.start() - port = server.port - try: - for data, v in self.test_wget_uris.items(): - with self.subTest(pn=data.pn): - self.d.setVar("PN", data.pn) - self.d.setVar("PV", data.pv) - if data.check_uri: - checkuri = "http://127.0.0.1:%s/%s" % (port, data.check_uri) - self.d.setVar("UPSTREAM_CHECK_URI", checkuri) - if data.check_regex: - self.d.setVar("UPSTREAM_CHECK_REGEX", data.check_regex) - - url = "http://127.0.0.1:%s/%s" % (port, data.path) - ud = bb.fetch2.FetchData(url, self.d) - pupver = ud.method.latest_versionstring(ud, self.d) - verstring = pupver[0] - self.assertTrue(verstring, msg="Could not find upstream version for %s" % data.pn) - r = bb.utils.vercmp_string(v, verstring) - self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (data.pn, v, verstring)) - finally: - server.stop() - - @skipIfNoNetwork() - def test_crate_latest_versionstring(self): - for k, v in self.test_crate_uris.items(): - with self.subTest(pn=k[0]): - self.d.setVar("PN", k[0]) - ud = bb.fetch2.FetchData(k[1], self.d) - pupver = ud.method.latest_versionstring(ud, self.d) - verstring = pupver[0] - self.assertTrue(verstring, msg="Could not find upstream version for %s" % k[0]) - r = bb.utils.vercmp_string(v, verstring) - self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring)) - -class FetchCheckStatusTest(FetcherTest): - test_wget_uris = ["https://downloads.yoctoproject.org/releases/sato/sato-engine-0.1.tar.gz", - "https://downloads.yoctoproject.org/releases/sato/sato-engine-0.2.tar.gz", - "https://downloads.yoctoproject.org/releases/sato/sato-engine-0.3.tar.gz", - "https://yoctoproject.org/", - "https://docs.yoctoproject.org", - "https://downloads.yoctoproject.org/releases/opkg/opkg-0.1.7.tar.gz", - "https://downloads.yoctoproject.org/releases/opkg/opkg-0.3.0.tar.gz", - "ftp://sourceware.org/pub/libffi/libffi-1.20.tar.gz", - # GitHub releases are hosted on Amazon S3, which doesn't support HEAD - "https://github.com/kergoth/tslib/releases/download/1.1/tslib-1.1.tar.xz" - ] - - @skipIfNoNetwork() - def test_wget_checkstatus(self): - fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d) - for u in self.test_wget_uris: - with self.subTest(url=u): - ud = fetch.ud[u] - m = ud.method - ret = m.checkstatus(fetch, ud, self.d) - self.assertTrue(ret, msg="URI %s, can't check status" % (u)) - - @skipIfNoNetwork() - def test_wget_checkstatus_connection_cache(self): - from bb.fetch2 import FetchConnectionCache - - connection_cache = FetchConnectionCache() - fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d, - connection_cache = connection_cache) - - for u in self.test_wget_uris: - with self.subTest(url=u): - ud = fetch.ud[u] - m = ud.method - ret = m.checkstatus(fetch, ud, self.d) - self.assertTrue(ret, msg="URI %s, can't check status" % (u)) - - connection_cache.close_connections() - - -class GitMakeShallowTest(FetcherTest): - def setUp(self): - FetcherTest.setUp(self) - self.gitdir = os.path.join(self.tempdir, 'gitshallow') - bb.utils.mkdirhier(self.gitdir) - self.git_init() - - def assertRefs(self, expected_refs): - actual_refs = self.git(['for-each-ref', '--format=%(refname)']).splitlines() - full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs).splitlines() - self.assertEqual(sorted(full_expected), sorted(actual_refs)) - - def assertRevCount(self, expected_count, args=None): - if args is None: - args = ['HEAD'] - revs = self.git(['rev-list'] + args) - actual_count = len(revs.splitlines()) - self.assertEqual(expected_count, actual_count, msg='Object count `%d` is not the expected `%d`' % (actual_count, expected_count)) - - def make_shallow(self, args=None): - if args is None: - args = ['HEAD'] - return bb.process.run([bb.fetch2.git.Git.make_shallow_path] + args, cwd=self.gitdir) - - def add_empty_file(self, path, msg=None): - if msg is None: - msg = path - open(os.path.join(self.gitdir, path), 'w').close() - self.git(['add', path]) - self.git(['commit', '-m', msg, path]) - - def test_make_shallow_single_branch_no_merge(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.assertRevCount(2) - self.make_shallow() - self.assertRevCount(1) - - def test_make_shallow_single_branch_one_merge(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.git('checkout -b a_branch') - self.add_empty_file('c') - self.git('checkout master') - self.add_empty_file('d') - self.git('merge --no-ff --no-edit a_branch') - self.git('branch -d a_branch') - self.add_empty_file('e') - self.assertRevCount(6) - self.make_shallow(['HEAD~2']) - self.assertRevCount(5) - - def test_make_shallow_at_merge(self): - self.add_empty_file('a') - self.git('checkout -b a_branch') - self.add_empty_file('b') - self.git('checkout master') - self.git('merge --no-ff --no-edit a_branch') - self.git('branch -d a_branch') - self.assertRevCount(3) - self.make_shallow() - self.assertRevCount(1) - - def test_make_shallow_annotated_tag(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.git('tag -a -m a_tag a_tag') - self.assertRevCount(2) - self.make_shallow(['a_tag']) - self.assertRevCount(1) - - def test_make_shallow_multi_ref(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.git('checkout -b a_branch') - self.add_empty_file('c') - self.git('checkout master') - self.add_empty_file('d') - self.git('checkout -b a_branch_2') - self.add_empty_file('a_tag') - self.git('tag a_tag') - self.git('checkout master') - self.git('branch -D a_branch_2') - self.add_empty_file('e') - self.assertRevCount(6, ['--all']) - self.make_shallow() - self.assertRevCount(5, ['--all']) - - def test_make_shallow_multi_ref_trim(self): - self.add_empty_file('a') - self.git('checkout -b a_branch') - self.add_empty_file('c') - self.git('checkout master') - self.assertRevCount(1) - self.assertRevCount(2, ['--all']) - self.assertRefs(['master', 'a_branch']) - self.make_shallow(['-r', 'master', 'HEAD']) - self.assertRevCount(1, ['--all']) - self.assertRefs(['master']) - - def test_make_shallow_noop(self): - self.add_empty_file('a') - self.assertRevCount(1) - self.make_shallow() - self.assertRevCount(1) - - @skipIfNoNetwork() - def test_make_shallow_bitbake(self): - self.git('remote add origin https://github.com/openembedded/bitbake') - self.git('fetch --tags origin') - orig_revs = len(self.git('rev-list --all').splitlines()) - self.make_shallow(['refs/tags/1.10.0']) - self.assertRevCount(orig_revs - 1746, ['--all']) - -class GitShallowTest(FetcherTest): - def setUp(self): - FetcherTest.setUp(self) - self.gitdir = os.path.join(self.tempdir, 'git') - self.srcdir = os.path.join(self.tempdir, 'gitsource') - - bb.utils.mkdirhier(self.srcdir) - self.git_init(cwd=self.srcdir) - self.d.setVar('WORKDIR', self.tempdir) - self.d.setVar('S', self.gitdir) - self.d.delVar('PREMIRRORS') - self.d.delVar('MIRRORS') - - uri = 'git://%s;protocol=file;subdir=${S};branch=master' % self.srcdir - self.d.setVar('SRC_URI', uri) - self.d.setVar('SRCREV', '${AUTOREV}') - self.d.setVar('AUTOREV', '${@bb.fetch2.get_autorev(d)}') - - self.d.setVar('BB_GIT_SHALLOW', '1') - self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '0') - self.d.setVar('BB_GENERATE_SHALLOW_TARBALLS', '1') - self.d.setVar("__BBSRCREV_SEEN", "1") - - def assertRefs(self, expected_refs, cwd=None): - if cwd is None: - cwd = self.gitdir - actual_refs = self.git(['for-each-ref', '--format=%(refname)'], cwd=cwd).splitlines() - # Resolve references into the same format as the comparision (needed by git 2.48 onwards) - actual_refs = self.git(['rev-parse', '--symbolic-full-name'] + actual_refs, cwd=cwd).splitlines() - full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs, cwd=cwd).splitlines() - self.assertEqual(sorted(set(full_expected)), sorted(set(actual_refs))) - - def assertRevCount(self, expected_count, args=None, cwd=None): - if args is None: - args = ['HEAD'] - if cwd is None: - cwd = self.gitdir - revs = self.git(['rev-list'] + args, cwd=cwd) - actual_count = len(revs.splitlines()) - self.assertEqual(expected_count, actual_count, msg='Object count `%d` is not the expected `%d`' % (actual_count, expected_count)) - - def add_empty_file(self, path, cwd=None, msg=None): - if msg is None: - msg = path - if cwd is None: - cwd = self.srcdir - open(os.path.join(cwd, path), 'w').close() - self.git(['add', path], cwd) - self.git(['commit', '-m', msg, path], cwd) - - def fetch(self, uri=None): - if uri is None: - uris = self.d.getVar('SRC_URI').split() - uri = uris[0] - d = self.d - else: - d = self.d.createCopy() - d.setVar('SRC_URI', uri) - uri = d.expand(uri) - uris = [uri] - - fetcher = bb.fetch2.Fetch(uris, d) - fetcher.download() - ud = fetcher.ud[uri] - return fetcher, ud - - def fetch_and_unpack(self, uri=None): - fetcher, ud = self.fetch(uri) - fetcher.unpack(self.d.getVar('WORKDIR')) - assert os.path.exists(self.d.getVar('S')) - return fetcher, ud - - def fetch_shallow(self, uri=None, disabled=False, keepclone=False): - """Fetch a uri, generating a shallow tarball, then unpack using it""" - fetcher, ud = self.fetch_and_unpack(uri) - - # Confirm that the unpacked repo is unshallow - if not disabled: - assert os.path.exists(os.path.join(self.dldir, ud.mirrortarballs[0])) - - # fetch and unpack, from the shallow tarball - bb.utils.remove(self.gitdir, recurse=True) - if os.path.exists(ud.clonedir): - bb.process.run('chmod u+w -R "%s"' % ud.clonedir) - bb.utils.remove(ud.clonedir, recurse=True) - bb.utils.remove(ud.clonedir.replace('gitsource', 'gitsubmodule'), recurse=True) - - # confirm that the unpacked repo is used when no git clone or git - # mirror tarball is available - fetcher, ud = self.fetch_and_unpack(uri) - if not disabled: - assert os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')), 'Unpacked git repository at %s is not shallow' % self.gitdir - else: - assert not os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')), 'Unpacked git repository at %s is shallow' % self.gitdir - return fetcher, ud - - def test_shallow_disabled(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.assertRevCount(2, cwd=self.srcdir) - - self.d.setVar('BB_GIT_SHALLOW', '0') - self.fetch_shallow(disabled=True) - self.assertRevCount(2) - - def test_shallow_nobranch(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.assertRevCount(2, cwd=self.srcdir) - - srcrev = self.git('rev-parse HEAD', cwd=self.srcdir).strip() - self.d.setVar('SRCREV', srcrev) - uri = self.d.getVar('SRC_URI').split()[0] - uri = '%s;nobranch=1;bare=1' % uri - - self.fetch_shallow(uri) - self.assertRevCount(1) - - # shallow refs are used to ensure the srcrev sticks around when we - # have no other branches referencing it - self.assertRefs(['refs/shallow/default']) - - def test_shallow_default_depth_1(self): - # Create initial git repo - self.add_empty_file('a') - self.add_empty_file('b') - self.assertRevCount(2, cwd=self.srcdir) - - self.fetch_shallow() - self.assertRevCount(1) - - def test_shallow_depth_0_disables(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.assertRevCount(2, cwd=self.srcdir) - - self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') - self.fetch_shallow(disabled=True) - self.assertRevCount(2) - - def test_shallow_depth_default_override(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.assertRevCount(2, cwd=self.srcdir) - - self.d.setVar('BB_GIT_SHALLOW_DEPTH', '2') - self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '1') - self.fetch_shallow() - self.assertRevCount(1) - - def test_shallow_depth_default_override_disable(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.add_empty_file('c') - self.assertRevCount(3, cwd=self.srcdir) - - self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') - self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '2') - self.fetch_shallow() - self.assertRevCount(2) - - def test_current_shallow_out_of_date_clone(self): - # Create initial git repo - self.add_empty_file('a') - self.add_empty_file('b') - self.add_empty_file('c') - self.assertRevCount(3, cwd=self.srcdir) - - # Clone without tarball - self.d.setVar('BB_GIT_SHALLOW', '0') - fetcher, ud = self.fetch() - - # Clone and generate mirror tarball - self.d.setVar('BB_GIT_SHALLOW', '1') - fetcher, ud = self.fetch() - - # Ensure we have a current mirror tarball, but an out of date clone - self.git('update-ref refs/heads/master refs/heads/master~1', cwd=ud.clonedir) - self.assertRevCount(2, cwd=ud.clonedir) - - # Fetch and unpack, from the current tarball, not the out of date clone - bb.utils.remove(self.gitdir, recurse=True) - fetcher, ud = self.fetch() - fetcher.unpack(self.d.getVar('WORKDIR')) - self.assertRevCount(1) - assert os.path.exists(os.path.join(self.d.getVar('WORKDIR'), 'git', 'c')) - - def test_shallow_single_branch_no_merge(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.assertRevCount(2, cwd=self.srcdir) - - self.fetch_shallow() - self.assertRevCount(1) - assert os.path.exists(os.path.join(self.gitdir, 'a')) - assert os.path.exists(os.path.join(self.gitdir, 'b')) - - def test_shallow_no_dangling(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.assertRevCount(2, cwd=self.srcdir) - - self.fetch_shallow() - self.assertRevCount(1) - assert not self.git('fsck --dangling') - - def test_shallow_srcrev_branch_truncation(self): - self.add_empty_file('a') - self.add_empty_file('b') - b_commit = self.git('rev-parse HEAD', cwd=self.srcdir).rstrip() - self.add_empty_file('c') - self.assertRevCount(3, cwd=self.srcdir) - - self.d.setVar('SRCREV', b_commit) - self.fetch_shallow() - - # The 'c' commit was removed entirely, and 'a' was removed from history - self.assertRevCount(1, ['--all']) - self.assertEqual(self.git('rev-parse HEAD').strip(), b_commit) - assert os.path.exists(os.path.join(self.gitdir, 'a')) - assert os.path.exists(os.path.join(self.gitdir, 'b')) - assert not os.path.exists(os.path.join(self.gitdir, 'c')) - - def test_shallow_ref_pruning(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.git('branch a_branch', cwd=self.srcdir) - self.assertRefs(['master', 'a_branch'], cwd=self.srcdir) - self.assertRevCount(2, cwd=self.srcdir) - - self.fetch_shallow() - - self.assertRefs(['master', 'origin/master']) - self.assertRevCount(1) - - def test_shallow_submodules(self): - self.add_empty_file('a') - self.add_empty_file('b') - - smdir = os.path.join(self.tempdir, 'gitsubmodule') - bb.utils.mkdirhier(smdir) - self.git_init(cwd=smdir) - # Make this look like it was cloned from a remote... - self.git('config --add remote.origin.url "%s"' % smdir, cwd=smdir) - self.git('config --add remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"', cwd=smdir) - self.add_empty_file('asub', cwd=smdir) - self.add_empty_file('bsub', cwd=smdir) - - self.git('submodule init', cwd=self.srcdir) - self.git('-c protocol.file.allow=always submodule add file://%s' % smdir, cwd=self.srcdir) - self.git('submodule update', cwd=self.srcdir) - self.git('commit -m submodule -a', cwd=self.srcdir) - - uri = 'gitsm://%s;protocol=file;subdir=${S};branch=master' % self.srcdir - fetcher, ud = self.fetch_shallow(uri) - - # Verify the main repository is shallow - self.assertRevCount(1) - - # Verify the gitsubmodule directory is present - assert os.listdir(os.path.join(self.gitdir, 'gitsubmodule')) - - # Verify the submodule is also shallow - self.assertRevCount(1, cwd=os.path.join(self.gitdir, 'gitsubmodule')) - - def test_shallow_submodule_mirrors(self): - self.add_empty_file('a') - self.add_empty_file('b') - - smdir = os.path.join(self.tempdir, 'gitsubmodule') - bb.utils.mkdirhier(smdir) - self.git_init(cwd=smdir) - # Make this look like it was cloned from a remote... - self.git('config --add remote.origin.url "%s"' % smdir, cwd=smdir) - self.git('config --add remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"', cwd=smdir) - self.add_empty_file('asub', cwd=smdir) - self.add_empty_file('bsub', cwd=smdir) - - self.git('submodule init', cwd=self.srcdir) - self.git('-c protocol.file.allow=always submodule add file://%s' % smdir, cwd=self.srcdir) - self.git('submodule update', cwd=self.srcdir) - self.git('commit -m submodule -a', cwd=self.srcdir) - - uri = 'gitsm://%s;protocol=file;subdir=${S};branch=master' % self.srcdir - - # Fetch once to generate the shallow tarball - fetcher, ud = self.fetch(uri) - - # Set up the mirror - mirrordir = os.path.join(self.tempdir, 'mirror') - bb.utils.rename(self.dldir, mirrordir) - self.d.setVar('PREMIRRORS', 'gitsm://.*/.* file://%s/' % mirrordir) - - # Fetch from the mirror - bb.utils.remove(self.dldir, recurse=True) - bb.utils.remove(self.gitdir, recurse=True) - self.fetch_and_unpack(uri) - - # Verify the main repository is shallow - self.assertRevCount(1) - - # Verify the gitsubmodule directory is present - assert os.listdir(os.path.join(self.gitdir, 'gitsubmodule')) - - # Verify the submodule is also shallow - self.assertRevCount(1, cwd=os.path.join(self.gitdir, 'gitsubmodule')) - - if any(os.path.exists(os.path.join(p, 'git-annex')) for p in os.environ.get('PATH').split(':')): - def test_shallow_annex(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.git('annex init', cwd=self.srcdir) - open(os.path.join(self.srcdir, 'c'), 'w').close() - self.git('annex add c', cwd=self.srcdir) - self.git('commit --author "Foo Bar " -m annex-c -a', cwd=self.srcdir) - bb.process.run('chmod u+w -R %s' % self.srcdir) - - uri = 'gitannex://%s;protocol=file;subdir=${S};branch=master' % self.srcdir - fetcher, ud = self.fetch_shallow(uri) - - self.assertRevCount(1) - assert './.git/annex/' in bb.process.run('tar -tzf %s' % os.path.join(self.dldir, ud.mirrortarballs[0]))[0] - assert os.path.exists(os.path.join(self.gitdir, 'c')) - - def test_shallow_clone_preferred_over_shallow(self): - self.add_empty_file('a') - self.add_empty_file('b') - - # Fetch once to generate the shallow tarball - self.d.setVar('BB_GIT_SHALLOW', '0') - fetcher, ud = self.fetch() - - # Fetch and unpack with both the clonedir and shallow tarball available - bb.utils.remove(self.gitdir, recurse=True) - self.d.setVar('BB_GIT_SHALLOW', '1') - fetcher, ud = self.fetch_and_unpack() - - # The unpacked tree should *not* be shallow - self.assertRevCount(2) - assert not os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')) - - def test_shallow_mirrors(self): - self.add_empty_file('a') - self.add_empty_file('b') - - # Fetch once to generate the shallow tarball - fetcher, ud = self.fetch() - mirrortarball = ud.mirrortarballs[0] - assert os.path.exists(os.path.join(self.dldir, mirrortarball)) - - # Set up the mirror - mirrordir = os.path.join(self.tempdir, 'mirror') - bb.utils.mkdirhier(mirrordir) - self.d.setVar('PREMIRRORS', 'git://.*/.* file://%s/' % mirrordir) - - bb.utils.rename(os.path.join(self.dldir, mirrortarball), - os.path.join(mirrordir, mirrortarball)) - - # Fetch from the mirror - bb.utils.remove(self.dldir, recurse=True) - bb.utils.remove(self.gitdir, recurse=True) - self.fetch_and_unpack() - self.assertRevCount(1) - - def test_shallow_invalid_depth(self): - self.add_empty_file('a') - self.add_empty_file('b') - - self.d.setVar('BB_GIT_SHALLOW_DEPTH', '-12') - with self.assertRaises(bb.fetch2.FetchError): - self.fetch() - - def test_shallow_invalid_depth_default(self): - self.add_empty_file('a') - self.add_empty_file('b') - - self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '-12') - with self.assertRaises(bb.fetch2.FetchError): - self.fetch() - - def test_shallow_extra_refs(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.git('branch a_branch', cwd=self.srcdir) - self.assertRefs(['master', 'a_branch'], cwd=self.srcdir) - self.assertRevCount(2, cwd=self.srcdir) - - self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/heads/a_branch') - self.fetch_shallow() - - self.assertRefs(['master', 'origin/master', 'origin/a_branch']) - self.assertRevCount(1) - - def test_shallow_extra_refs_wildcard(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.git('branch a_branch', cwd=self.srcdir) - self.git('tag v1.0', cwd=self.srcdir) - self.assertRefs(['master', 'a_branch', 'v1.0'], cwd=self.srcdir) - self.assertRevCount(2, cwd=self.srcdir) - - self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/tags/*') - self.fetch_shallow() - - self.assertRefs(['master', 'origin/master', 'v1.0']) - self.assertRevCount(1) - - def test_shallow_missing_extra_refs(self): - self.add_empty_file('a') - self.add_empty_file('b') - - self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/heads/foo') - with self.assertRaises(bb.fetch2.FetchError): - self.fetch() - - def test_shallow_missing_extra_refs_wildcard(self): - self.add_empty_file('a') - self.add_empty_file('b') - - self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/tags/*') - self.fetch() - - def test_shallow_remove_revs(self): - # Create initial git repo - self.add_empty_file('a') - self.add_empty_file('b') - self.git('checkout -b a_branch', cwd=self.srcdir) - self.add_empty_file('c') - self.add_empty_file('d') - self.git('checkout master', cwd=self.srcdir) - self.git('tag v0.0 a_branch', cwd=self.srcdir) - self.add_empty_file('e') - self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir) - self.git('branch -d a_branch', cwd=self.srcdir) - self.add_empty_file('f') - self.assertRevCount(7, cwd=self.srcdir) - - self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') - self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0') - - self.fetch_shallow() - - self.assertRevCount(2) - - def test_shallow_invalid_revs(self): - self.add_empty_file('a') - self.add_empty_file('b') - - self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') - self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0') - - with self.assertRaises(bb.fetch2.FetchError): - self.fetch() - - def test_shallow_fetch_missing_revs(self): - self.add_empty_file('a') - self.add_empty_file('b') - fetcher, ud = self.fetch(self.d.getVar('SRC_URI')) - self.git('tag v0.0 master', cwd=self.srcdir) - self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') - self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0') - - with self.assertRaises(bb.fetch2.FetchError), self.assertLogs("BitBake.Fetcher", level="ERROR") as cm: - self.fetch_shallow() - self.assertIn("fatal: no commits selected for shallow requests", cm.output[0]) - - def test_shallow_fetch_missing_revs_fails(self): - self.add_empty_file('a') - self.add_empty_file('b') - fetcher, ud = self.fetch(self.d.getVar('SRC_URI')) - self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') - self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0') - - with self.assertRaises(bb.fetch2.FetchError), self.assertLogs("BitBake.Fetcher", level="ERROR") as cm: - self.fetch_shallow() - self.assertIn("Unable to find revision v0.0 even from upstream", cm.output[0]) - - @skipIfNoNetwork() - def test_git_shallow_fetch_premirrors(self): - url = "git://git.openembedded.org/bitbake;branch=master;protocol=https" - - # Create a separate premirror directory within tempdir - premirror = os.path.join(self.tempdir, "premirror") - os.mkdir(premirror) - - # Fetch a non-shallow clone into the premirror subdir - self.d.setVar('BB_GIT_SHALLOW', '0') - self.d.setVar("DL_DIR", premirror) - fetcher, ud = self.fetch(url) - - # Fetch a shallow clone from the premirror subdir with unpacking - # using the original recipe URL and the premirror mapping - self.d.setVar('BB_GIT_SHALLOW', '1') - self.d.setVar("DL_DIR", self.dldir) - self.d.setVar('BB_FETCH_PREMIRRORONLY', '1') - self.d.setVar('BB_NO_NETWORK', '1') - self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '0') - self.d.setVar("PREMIRRORS", "git://.*/.* git://{0};protocol=file".format(premirror + "/git2/" + ud.host + ud.path.replace("/", "."))) - fetcher = self.fetch_and_unpack(url) - - # Verify that the unpacked sources are shallow clones - self.assertRevCount(1) - assert os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')) - - @skipIfNoNetwork() - def test_bitbake(self): - self.git('remote add --mirror=fetch origin https://github.com/openembedded/bitbake', cwd=self.srcdir) - self.git('config core.bare true', cwd=self.srcdir) - self.git('fetch', cwd=self.srcdir) - - self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') - # Note that the 1.10.0 tag is annotated, so this also tests - # reference of an annotated vs unannotated tag - self.d.setVar('BB_GIT_SHALLOW_REVS', '1.10.0') - - self.fetch_shallow() - - # Confirm that the history of 1.10.0 was removed - orig_revs = len(self.git('rev-list master', cwd=self.srcdir).splitlines()) - revs = len(self.git('rev-list master').splitlines()) - self.assertNotEqual(orig_revs, revs) - self.assertRefs(['master', 'origin/master']) - self.assertRevCount(orig_revs - 1760) - - def test_that_unpack_throws_an_error_when_the_git_clone_nor_shallow_tarball_exist(self): - self.add_empty_file('a') - fetcher, ud = self.fetch() - bb.utils.remove(self.gitdir, recurse=True) - bb.utils.remove(self.dldir, recurse=True) - - with self.assertRaises(bb.fetch2.UnpackError) as context: - fetcher.unpack(self.d.getVar('WORKDIR')) - - self.assertIn("No up to date source found", context.exception.msg) - self.assertIn("clone directory not available or not up to date", context.exception.msg) - - def test_shallow_check_is_shallow(self): - self.add_empty_file('a') - self.add_empty_file('b') - - # Fetch and unpack without the clonedir and *only* shallow tarball available - bb.utils.remove(self.gitdir, recurse=True) - fetcher, ud = self.fetch_and_unpack() - - # The unpacked tree *should* be shallow - self.assertRevCount(1) - assert os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')) - - def test_shallow_succeeds_with_tag_containing_slash(self): - self.add_empty_file('a') - self.add_empty_file('b') - self.git('tag t1/t2/t3', cwd=self.srcdir) - self.assertRevCount(2, cwd=self.srcdir) - - srcrev = self.git('rev-parse HEAD', cwd=self.srcdir).strip() - self.d.setVar('SRCREV', srcrev) - uri = self.d.getVar('SRC_URI').split()[0] - uri = '%s;tag=t1/t2/t3' % uri - self.fetch_shallow(uri) - self.assertRevCount(1) - -class GitLfsTest(FetcherTest): - def skipIfNoGitLFS(): - if not shutil.which('git-lfs'): - return unittest.skip('git-lfs not installed') - return lambda f: f - - def setUp(self): - FetcherTest.setUp(self) - - self.gitdir = os.path.join(self.tempdir, 'git') - self.srcdir = os.path.join(self.tempdir, 'gitsource') - - self.d.setVar('WORKDIR', self.tempdir) - self.d.setVar('S', self.gitdir) - self.d.delVar('PREMIRRORS') - self.d.delVar('MIRRORS') - - self.d.setVar('SRCREV', '${AUTOREV}') - self.d.setVar('AUTOREV', '${@bb.fetch2.get_autorev(d)}') - self.d.setVar("__BBSRCREV_SEEN", "1") - - bb.utils.mkdirhier(self.srcdir) - self.git_init(cwd=self.srcdir) - self.commit_file('.gitattributes', '*.mp3 filter=lfs -text') - - def commit(self, *, cwd=None): - cwd = cwd or self.srcdir - self.git(["commit", "-m", "Change"], cwd=cwd) - return self.git(["rev-parse", "HEAD"], cwd=cwd).strip() - - def commit_file(self, filename, content, *, cwd=None): - cwd = cwd or self.srcdir - - with open(os.path.join(cwd, filename), "w") as f: - f.write(content) - self.git(["add", filename], cwd=cwd) - return self.commit(cwd=cwd) - - def fetch(self, uri=None, download=True): - uris = self.d.getVar('SRC_URI').split() - uri = uris[0] - d = self.d - - fetcher = bb.fetch2.Fetch(uris, d) - if download: - fetcher.download() - ud = fetcher.ud[uri] - return fetcher, ud - - def get_real_git_lfs_file(self): - self.d.setVar('PATH', os.environ.get('PATH')) - fetcher, ud = self.fetch() - fetcher.unpack(self.d.getVar('WORKDIR')) - unpacked_lfs_file = os.path.join(self.d.getVar('WORKDIR'), 'git', "Cat_poster_1.jpg") - return unpacked_lfs_file - - @skipIfNoGitLFS() - def test_gitsm_lfs(self): - """Test that the gitsm fetcher caches objects stored via LFS""" - self.git(["lfs", "install", "--local"], cwd=self.srcdir) - - def fetch_and_verify(revision, filename, content): - self.d.setVar('SRCREV', revision) - fetcher, ud = self.fetch() - - with hide_directory(submoduledir), hide_directory(self.srcdir): - workdir = self.d.getVar('WORKDIR') - fetcher.unpack(workdir) - - with open(os.path.join(workdir, "git", filename)) as f: - self.assertEqual(f.read(), content) - - # Create the git repository that will later be used as a submodule - submoduledir = self.tempdir + "/submodule" - bb.utils.mkdirhier(submoduledir) - self.git_init(submoduledir) - self.git(["lfs", "install", "--local"], cwd=submoduledir) - self.commit_file('.gitattributes', '*.mp3 filter=lfs -text', cwd=submoduledir) - - submodule_commit_1 = self.commit_file("a.mp3", "submodule version 1", cwd=submoduledir) - _ = self.commit_file("a.mp3", "submodule version 2", cwd=submoduledir) - - # Add the submodule to the repository at its current HEAD revision - self.git(["-c", "protocol.file.allow=always", "submodule", "add", submoduledir, "submodule"], - cwd=self.srcdir) - base_commit_1 = self.commit() - - # Let the submodule point at a different revision - self.git(["checkout", submodule_commit_1], self.srcdir + "/submodule") - self.git(["add", "submodule"], cwd=self.srcdir) - base_commit_2 = self.commit() - - # Add a LFS file to the repository - base_commit_3 = self.commit_file("a.mp3", "version 1") - # Update the added LFS file - base_commit_4 = self.commit_file("a.mp3", "version 2") - - self.d.setVar('SRC_URI', "gitsm://%s;protocol=file;lfs=1;branch=master" % self.srcdir) - - # Verify that LFS objects referenced from submodules are fetched and checked out - fetch_and_verify(base_commit_1, "submodule/a.mp3", "submodule version 2") - # Verify that the repository inside the download cache of a submodile is extended with any - # additional LFS objects needed when checking out a different revision. - fetch_and_verify(base_commit_2, "submodule/a.mp3", "submodule version 1") - # Verify that LFS objects referenced from the base repository are fetched and checked out - fetch_and_verify(base_commit_3, "a.mp3", "version 1") - # Verify that the cached repository is extended with any additional LFS objects required - # when checking out a different revision. - fetch_and_verify(base_commit_4, "a.mp3", "version 2") - - @skipIfNoGitLFS() - def test_gitsm_lfs_disabled(self): - """Test that the gitsm fetcher does not use LFS when explicitly disabled""" - self.git(["lfs", "install", "--local"], cwd=self.srcdir) - - def fetch_and_verify(revision, filename, content): - self.d.setVar('SRCREV', revision) - fetcher, ud = self.fetch() - - with hide_directory(submoduledir), hide_directory(self.srcdir): - workdir = self.d.getVar('WORKDIR') - fetcher.unpack(workdir) - - with open(os.path.join(workdir, "git", filename)) as f: - # Assume that LFS did not perform smudging when the expected content is - # missing. - self.assertNotEqual(f.read(), content) - - # Create the git repository that will later be used as a submodule - submoduledir = self.tempdir + "/submodule" - bb.utils.mkdirhier(submoduledir) - self.git_init(submoduledir) - self.git(["lfs", "install", "--local"], cwd=submoduledir) - self.commit_file('.gitattributes', '*.mp3 filter=lfs -text', cwd=submoduledir) - - submodule_commit_1 = self.commit_file("a.mp3", "submodule version 1", cwd=submoduledir) - - # Add the submodule to the repository at its current HEAD revision - self.git(["-c", "protocol.file.allow=always", "submodule", "add", submoduledir, "submodule"], - cwd=self.srcdir) - base_commit_1 = self.commit() - - # Add a LFS file to the repository - base_commit_2 = self.commit_file("a.mp3", "version 1") - - self.d.setVar('SRC_URI', "gitsm://%s;protocol=file;lfs=1;branch=master;lfs=0" % self.srcdir) - - # Verify that LFS objects referenced from submodules are not fetched nor checked out - fetch_and_verify(base_commit_1, "submodule/a.mp3", "submodule version 1") - # Verify that the LFS objects referenced from the base repository are not fetched nor - # checked out - fetch_and_verify(base_commit_2, "a.mp3", "version 1") - - @skipIfNoGitLFS() - def test_fetch_lfs_on_srcrev_change(self): - """Test if fetch downloads missing LFS objects when a different revision within an existing repository is requested""" - self.git(["lfs", "install", "--local"], cwd=self.srcdir) - - def fetch_and_verify(revision, filename, content): - self.d.setVar('SRCREV', revision) - fetcher, ud = self.fetch() - - with hide_directory(self.srcdir): - workdir = self.d.getVar('WORKDIR') - fetcher.unpack(workdir) - - with open(os.path.join(workdir, "git", filename)) as f: - self.assertEqual(f.read(), content) - - commit_1 = self.commit_file("a.mp3", "version 1") - commit_2 = self.commit_file("a.mp3", "version 2") - - self.d.setVar('SRC_URI', "git://%s;protocol=file;lfs=1;branch=master" % self.srcdir) - - # Seed the local download folder by fetching the latest commit and verifying that the LFS contents are - # available even when the upstream repository disappears. - fetch_and_verify(commit_2, "a.mp3", "version 2") - # Verify that even when an older revision is fetched, the needed LFS objects are fetched into the download - # folder. - fetch_and_verify(commit_1, "a.mp3", "version 1") - - @skipIfNoGitLFS() - @skipIfNoNetwork() - def test_real_git_lfs_repo_succeeds_without_lfs_param(self): - self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master") - f = self.get_real_git_lfs_file() - self.assertTrue(os.path.exists(f)) - self.assertEqual("c0baab607a97839c9a328b4310713307", bb.utils.md5_file(f)) - - @skipIfNoGitLFS() - @skipIfNoNetwork() - def test_real_git_lfs_repo_succeeds(self): - self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master;lfs=1") - f = self.get_real_git_lfs_file() - self.assertTrue(os.path.exists(f)) - self.assertEqual("c0baab607a97839c9a328b4310713307", bb.utils.md5_file(f)) - - @skipIfNoGitLFS() - @skipIfNoNetwork() - def test_real_git_lfs_repo_skips(self): - self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master;lfs=0") - f = self.get_real_git_lfs_file() - # This is the actual non-smudged placeholder file on the repo if git-lfs does not run - lfs_file = ( - 'version https://git-lfs.github.com/spec/v1\n' - 'oid sha256:34be66b1a39a1955b46a12588df9d5f6fc1da790e05cf01f3c7422f4bbbdc26b\n' - 'size 11423554\n' - ) - - with open(f) as fh: - self.assertEqual(lfs_file, fh.read()) - - @skipIfNoGitLFS() - def test_lfs_enabled(self): - uri = 'git://%s;protocol=file;lfs=1;branch=master' % self.srcdir - self.d.setVar('SRC_URI', uri) - - # With git-lfs installed, test that we can fetch and unpack - fetcher, ud = self.fetch() - shutil.rmtree(self.gitdir, ignore_errors=True) - fetcher.unpack(self.d.getVar('WORKDIR')) - - @skipIfNoGitLFS() - def test_lfs_disabled(self): - uri = 'git://%s;protocol=file;lfs=0;branch=master' % self.srcdir - self.d.setVar('SRC_URI', uri) - - # Verify that the fetcher can survive even if the source - # repository has Git LFS usage configured. - fetcher, ud = self.fetch() - fetcher.unpack(self.d.getVar('WORKDIR')) - - @skipIfNoGitLFS() - def test_lfs_enabled_not_installed_during_unpack(self): - uri = 'git://%s;protocol=file;lfs=1;branch=master' % self.srcdir - self.d.setVar('SRC_URI', uri) - - # Careful: suppress initial attempt at downloading - fetcher, ud = self.fetch(uri=None, download=False) - - fetcher.download() - # If git-lfs cannot be found, the unpack should throw an error - with self.assertRaises(bb.fetch2.FetchError): - with unittest.mock.patch("shutil.which", return_value=None): - shutil.rmtree(self.gitdir, ignore_errors=True) - fetcher.unpack(self.d.getVar('WORKDIR')) - - def test_lfs_enabled_not_installed(self): - uri = 'git://%s;protocol=file;lfs=1;branch=master' % self.srcdir - self.d.setVar('SRC_URI', uri) - - # Careful: suppress initial attempt at downloading - fetcher, ud = self.fetch(uri=None, download=False) - - # If git-lfs cannot be found, the download should throw an error - with unittest.mock.patch("shutil.which", return_value=None): - with self.assertRaises(bb.fetch2.FetchError): - fetcher.download() - - def test_lfs_disabled_not_installed(self): - uri = 'git://%s;protocol=file;lfs=0;branch=master' % self.srcdir - self.d.setVar('SRC_URI', uri) - - # Careful: suppress initial attempt at downloading - fetcher, ud = self.fetch(uri=None, download=False) - - # Even if git-lfs cannot be found, the download / unpack should be successful - with unittest.mock.patch("shutil.which", return_value=None): - fetcher.download() - shutil.rmtree(self.gitdir, ignore_errors=True) - fetcher.unpack(self.d.getVar('WORKDIR')) - - def test_lfs_enabled_not_installed_but_not_needed(self): - srcdir = os.path.join(self.tempdir, "emptygit") - bb.utils.mkdirhier(srcdir) - self.git_init(srcdir) - self.commit_file("test", "test content", cwd=srcdir) - - uri = 'git://%s;protocol=file;lfs=1;branch=master' % srcdir - self.d.setVar('SRC_URI', uri) - - # Careful: suppress initial attempt at downloading - fetcher, ud = self.fetch(uri=None, download=False) - - # It shouldnt't matter that git-lfs cannot be found as the repository configuration does not - # specify any LFS filters. - with unittest.mock.patch("shutil.which", return_value=None): - fetcher.download() - shutil.rmtree(self.gitdir, ignore_errors=True) - fetcher.unpack(self.d.getVar('WORKDIR')) - -class GitURLWithSpacesTest(FetcherTest): - test_git_urls = { - "git://tfs-example.org:22/tfs/example%20path/example.git;branch=master" : { - 'url': 'git://tfs-example.org:22/tfs/example%20path/example.git;branch=master', - 'repo_url': 'git://tfs-example.org:22/tfs/example%20path/example.git', - 'gitsrcname': 'tfs-example.org.22.tfs.example_path.example.git', - 'path': '/tfs/example path/example.git' - }, - "git://tfs-example.org:22/tfs/example%20path/example%20repo.git;branch=master" : { - 'url': 'git://tfs-example.org:22/tfs/example%20path/example%20repo.git;branch=master', - 'repo_url': 'git://tfs-example.org:22/tfs/example%20path/example%20repo.git', - 'gitsrcname': 'tfs-example.org.22.tfs.example_path.example_repo.git', - 'path': '/tfs/example path/example repo.git' - } - } - - def test_urls(self): - - # Set fake SRCREV to stop git fetcher from trying to contact non-existent git repo - self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40') - - for test_git_url, ref in self.test_git_urls.items(): - - fetcher = bb.fetch.Fetch([test_git_url], self.d) - ud = fetcher.ud[fetcher.urls[0]] - - self.assertEqual(ud.url, ref['url']) - self.assertEqual(ud.path, ref['path']) - self.assertEqual(ud.localfile, os.path.join(self.dldir, "git2", ref['gitsrcname'])) - self.assertEqual(ud.localpath, os.path.join(self.dldir, "git2", ref['gitsrcname'])) - self.assertEqual(ud.lockfile, os.path.join(self.dldir, "git2", ref['gitsrcname'] + '.lock')) - self.assertEqual(ud.clonedir, os.path.join(self.dldir, "git2", ref['gitsrcname'])) - self.assertEqual(ud.fullmirror, os.path.join(self.dldir, "git2_" + ref['gitsrcname'] + '.tar.gz')) - self.assertEqual(ud.method._get_repo_url(ud), ref['repo_url']) - - -class FetchLocallyMissingTagFromRemote(FetcherTest): - def setUp(self): - FetcherTest.setUp(self) - self.gitdir = os.path.join(self.tempdir, 'git') - self.srcdir = os.path.join(self.tempdir, 'gitsource') - - bb.utils.mkdirhier(self.srcdir) - self.git_init(cwd=self.srcdir) - self.d.setVar('WORKDIR', self.tempdir) - self.d.setVar('S', self.gitdir) - - uri = 'git://%s;protocol=file;subdir=${S};branch=master' % self.srcdir - self.d.setVar('SRC_URI', uri) - - open(os.path.join(self.srcdir, 'dummyfile'), 'w').close() - self.git(['add', 'dummyfile'], self.srcdir) - self.git(['commit', '-m', 'dummymsg', 'dummyfile'], self.srcdir) - - def _fetch_and_unpack(self, uri_to_fetch): - fetcher = bb.fetch2.Fetch([uri_to_fetch], self.d) - fetcher.download() - fetcher.unpack(self.d.getVar('WORKDIR')) - - def test_tag_present_in_remote_but_not_local(self): - # fetch a repo that has no tag in it - # then add a tag to this repo, and fetch it again, without - # changing SRC_REV, but by adding ';tag=tag1` to SRC_URI - # the new tag should be fetched and unpacked - srcrev = self.git('rev-parse HEAD', cwd=self.srcdir).strip() - self.d.setVar('SRCREV', srcrev) - src_uri = self.d.getVar('SRC_URI') - self._fetch_and_unpack(src_uri) - - self.git('tag -m -a tag1', cwd=self.srcdir) - - src_uri = '%s;tag=tag1' % self.d.getVar('SRC_URI').split()[0] - self.d.setVar('SRC_URI', src_uri) - self._fetch_and_unpack(src_uri) - - output = self.git('log --pretty=oneline -n 1 refs/tags/tag1', cwd=self.gitdir) - assert "fatal: ambiguous argument" not in output - - -class CrateTest(FetcherTest): - @skipIfNoNetwork() - def test_crate_url(self): - - uri = "crate://crates.io/glob/0.2.11" - self.d.setVar('SRC_URI', uri) - - uris = self.d.getVar('SRC_URI').split() - d = self.d - - fetcher = bb.fetch2.Fetch(uris, self.d) - ud = fetcher.ud[fetcher.urls[0]] - - self.assertIn("name", ud.parm) - self.assertEqual(ud.parm["name"], "glob-0.2.11") - self.assertIn("downloadfilename", ud.parm) - self.assertEqual(ud.parm["downloadfilename"], "glob-0.2.11.crate") - - fetcher.download() - fetcher.unpack(self.tempdir) - self.assertEqual(sorted(os.listdir(self.tempdir)), ['cargo_home', 'download' , 'unpacked']) - self.assertEqual(sorted(os.listdir(self.tempdir + "/download")), ['glob-0.2.11.crate', 'glob-0.2.11.crate.done']) - self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/glob-0.2.11/.cargo-checksum.json")) - self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/glob-0.2.11/src/lib.rs")) - - @skipIfNoNetwork() - def test_crate_url_matching_recipe(self): - - self.d.setVar('BP', 'glob-0.2.11') - - uri = "crate://crates.io/glob/0.2.11" - self.d.setVar('SRC_URI', uri) - - uris = self.d.getVar('SRC_URI').split() - d = self.d - - fetcher = bb.fetch2.Fetch(uris, self.d) - ud = fetcher.ud[fetcher.urls[0]] - - self.assertIn("name", ud.parm) - self.assertEqual(ud.parm["name"], "glob-0.2.11") - self.assertIn("downloadfilename", ud.parm) - self.assertEqual(ud.parm["downloadfilename"], "glob-0.2.11.crate") - - fetcher.download() - fetcher.unpack(self.tempdir) - self.assertEqual(sorted(os.listdir(self.tempdir)), ['download', 'glob-0.2.11', 'unpacked']) - self.assertEqual(sorted(os.listdir(self.tempdir + "/download")), ['glob-0.2.11.crate', 'glob-0.2.11.crate.done']) - self.assertTrue(os.path.exists(self.tempdir + "/glob-0.2.11/src/lib.rs")) - - @skipIfNoNetwork() - def test_crate_url_params(self): - - uri = "crate://crates.io/aho-corasick/0.7.20;name=aho-corasick-renamed" - self.d.setVar('SRC_URI', uri) - - uris = self.d.getVar('SRC_URI').split() - d = self.d - - fetcher = bb.fetch2.Fetch(uris, self.d) - ud = fetcher.ud[fetcher.urls[0]] - - self.assertIn("name", ud.parm) - self.assertEqual(ud.parm["name"], "aho-corasick-renamed") - self.assertIn("downloadfilename", ud.parm) - self.assertEqual(ud.parm["downloadfilename"], "aho-corasick-0.7.20.crate") - - fetcher.download() - fetcher.unpack(self.tempdir) - self.assertEqual(sorted(os.listdir(self.tempdir)), ['cargo_home', 'download' , 'unpacked']) - self.assertEqual(sorted(os.listdir(self.tempdir + "/download")), ['aho-corasick-0.7.20.crate', 'aho-corasick-0.7.20.crate.done']) - self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/aho-corasick-0.7.20/.cargo-checksum.json")) - self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/aho-corasick-0.7.20/src/lib.rs")) - - @skipIfNoNetwork() - def test_crate_url_multi(self): - - uri = "crate://crates.io/glob/0.2.11 crate://crates.io/time/0.1.35" - self.d.setVar('SRC_URI', uri) - - uris = self.d.getVar('SRC_URI').split() - d = self.d - - fetcher = bb.fetch2.Fetch(uris, self.d) - ud = fetcher.ud[fetcher.urls[0]] - - self.assertIn("name", ud.parm) - self.assertEqual(ud.parm["name"], "glob-0.2.11") - self.assertIn("downloadfilename", ud.parm) - self.assertEqual(ud.parm["downloadfilename"], "glob-0.2.11.crate") - - ud = fetcher.ud[fetcher.urls[1]] - self.assertIn("name", ud.parm) - self.assertEqual(ud.parm["name"], "time-0.1.35") - self.assertIn("downloadfilename", ud.parm) - self.assertEqual(ud.parm["downloadfilename"], "time-0.1.35.crate") - - fetcher.download() - fetcher.unpack(self.tempdir) - self.assertEqual(sorted(os.listdir(self.tempdir)), ['cargo_home', 'download' , 'unpacked']) - self.assertEqual(sorted(os.listdir(self.tempdir + "/download")), ['glob-0.2.11.crate', 'glob-0.2.11.crate.done', 'time-0.1.35.crate', 'time-0.1.35.crate.done']) - self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/glob-0.2.11/.cargo-checksum.json")) - self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/glob-0.2.11/src/lib.rs")) - self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/time-0.1.35/.cargo-checksum.json")) - self.assertTrue(os.path.exists(self.tempdir + "/cargo_home/bitbake/time-0.1.35/src/lib.rs")) - - @skipIfNoNetwork() - def test_crate_incorrect_cksum(self): - uri = "crate://crates.io/aho-corasick/0.7.20" - self.d.setVar('SRC_URI', uri) - self.d.setVarFlag("SRC_URI", "aho-corasick-0.7.20.sha256sum", hashlib.sha256("Invalid".encode("utf-8")).hexdigest()) - - uris = self.d.getVar('SRC_URI').split() - - fetcher = bb.fetch2.Fetch(uris, self.d) - with self.assertRaisesRegex(bb.fetch2.FetchError, "Fetcher failure for URL"): - fetcher.download() - -class NPMTest(FetcherTest): - def skipIfNoNpm(): - if not shutil.which('npm'): - return unittest.skip('npm not installed') - return lambda f: f - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm(self): - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=1.0.0'] - fetcher = bb.fetch.Fetch(urls, self.d) - ud = fetcher.ud[fetcher.urls[0]] - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - self.assertTrue(os.path.exists(ud.localpath + '.done')) - self.assertTrue(os.path.exists(ud.resolvefile)) - fetcher.unpack(self.unpackdir) - unpackdir = os.path.join(self.unpackdir, 'npm') - self.assertTrue(os.path.exists(os.path.join(unpackdir, 'package.json'))) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_bad_checksum(self): - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=1.0.0'] - # Fetch once to get a tarball - fetcher = bb.fetch.Fetch(urls, self.d) - ud = fetcher.ud[fetcher.urls[0]] - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - # Modify the tarball - bad = b'bad checksum' - with open(ud.localpath, 'wb') as f: - f.write(bad) - # Verify that the tarball is fetched again - fetcher.download() - badsum = hashlib.sha512(bad).hexdigest() - self.assertTrue(os.path.exists(ud.localpath + '_bad-checksum_' + badsum)) - self.assertTrue(os.path.exists(ud.localpath)) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_premirrors(self): - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=1.0.0'] - # Fetch once to get a tarball - fetcher = bb.fetch.Fetch(urls, self.d) - ud = fetcher.ud[fetcher.urls[0]] - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - - # Setup the mirror by renaming the download directory - mirrordir = os.path.join(self.tempdir, 'mirror') - bb.utils.rename(self.dldir, mirrordir) - os.mkdir(self.dldir) - - # Configure the premirror to be used - self.d.setVar('PREMIRRORS', 'https?$://.*/.* file://%s/npm2' % mirrordir) - self.d.setVar('BB_FETCH_PREMIRRORONLY', '1') - - # Fetch again - self.assertFalse(os.path.exists(ud.localpath)) - # The npm fetcher doesn't handle that the .resolved file disappears - # while the fetcher object exists, which it does when we rename the - # download directory to "mirror" above. Thus we need a new fetcher to go - # with the now empty download directory. - fetcher = bb.fetch.Fetch(urls, self.d) - ud = fetcher.ud[fetcher.urls[0]] - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_premirrors_with_specified_filename(self): - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=1.0.0'] - # Fetch once to get a tarball - fetcher = bb.fetch.Fetch(urls, self.d) - ud = fetcher.ud[fetcher.urls[0]] - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - # Setup the mirror - mirrordir = os.path.join(self.tempdir, 'mirror') - bb.utils.mkdirhier(mirrordir) - mirrorfilename = os.path.join(mirrordir, os.path.basename(ud.localpath)) - os.replace(ud.localpath, mirrorfilename) - self.d.setVar('PREMIRRORS', 'https?$://.*/.* file://%s' % mirrorfilename) - self.d.setVar('BB_FETCH_PREMIRRORONLY', '1') - # Fetch again - self.assertFalse(os.path.exists(ud.localpath)) - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_mirrors(self): - # Fetch once to get a tarball - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=1.0.0'] - fetcher = bb.fetch.Fetch(urls, self.d) - ud = fetcher.ud[fetcher.urls[0]] - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - # Setup the mirror - mirrordir = os.path.join(self.tempdir, 'mirror') - bb.utils.mkdirhier(mirrordir) - os.replace(ud.localpath, os.path.join(mirrordir, os.path.basename(ud.localpath))) - self.d.setVar('MIRRORS', 'https?$://.*/.* file://%s/' % mirrordir) - # Update the resolved url to an invalid url - with open(ud.resolvefile, 'r') as f: - url = f.read() - uri = URI(url) - uri.path = '/invalid' - with open(ud.resolvefile, 'w') as f: - f.write(str(uri)) - # Fetch again - self.assertFalse(os.path.exists(ud.localpath)) - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_destsuffix_downloadfilename(self): - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=1.0.0;destsuffix=foo/bar;downloadfilename=foo-bar.tgz'] - fetcher = bb.fetch.Fetch(urls, self.d) - fetcher.download() - self.assertTrue(os.path.exists(os.path.join(self.dldir, 'npm2', 'foo-bar.tgz'))) - fetcher.unpack(self.unpackdir) - unpackdir = os.path.join(self.unpackdir, 'foo', 'bar') - self.assertTrue(os.path.exists(os.path.join(unpackdir, 'package.json'))) - - def test_npm_no_network_no_tarball(self): - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=1.0.0'] - self.d.setVar('BB_NO_NETWORK', '1') - fetcher = bb.fetch.Fetch(urls, self.d) - with self.assertRaises(bb.fetch2.NetworkAccess): - fetcher.download() - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_no_network_with_tarball(self): - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=1.0.0'] - # Fetch once to get a tarball - fetcher = bb.fetch.Fetch(urls, self.d) - fetcher.download() - # Disable network access - self.d.setVar('BB_NO_NETWORK', '1') - # Fetch again - fetcher.download() - fetcher.unpack(self.unpackdir) - unpackdir = os.path.join(self.unpackdir, 'npm') - self.assertTrue(os.path.exists(os.path.join(unpackdir, 'package.json'))) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_registry_alternate(self): - urls = ['npm://skimdb.npmjs.com;package=@savoirfairelinux/node-server-example;version=1.0.0'] - fetcher = bb.fetch.Fetch(urls, self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - unpackdir = os.path.join(self.unpackdir, 'npm') - self.assertTrue(os.path.exists(os.path.join(unpackdir, 'package.json'))) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_version_latest(self): - url = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=latest'] - fetcher = bb.fetch.Fetch(url, self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - unpackdir = os.path.join(self.unpackdir, 'npm') - self.assertTrue(os.path.exists(os.path.join(unpackdir, 'package.json'))) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_registry_invalid(self): - urls = ['npm://registry.invalid.org;package=@savoirfairelinux/node-server-example;version=1.0.0'] - fetcher = bb.fetch.Fetch(urls, self.d) - with self.assertRaises(bb.fetch2.FetchError): - fetcher.download() - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_package_invalid(self): - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/invalid;version=1.0.0'] - fetcher = bb.fetch.Fetch(urls, self.d) - with self.assertRaises(bb.fetch2.FetchError): - fetcher.download() - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_version_invalid(self): - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=invalid'] - with self.assertRaises(bb.fetch2.ParameterError): - fetcher = bb.fetch.Fetch(urls, self.d) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_registry_none(self): - urls = ['npm://;package=@savoirfairelinux/node-server-example;version=1.0.0'] - with self.assertRaises(bb.fetch2.MalformedUrl): - fetcher = bb.fetch.Fetch(urls, self.d) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_package_none(self): - urls = ['npm://registry.npmjs.org;version=1.0.0'] - with self.assertRaises(bb.fetch2.MissingParameterError): - fetcher = bb.fetch.Fetch(urls, self.d) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npm_version_none(self): - urls = ['npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example'] - with self.assertRaises(bb.fetch2.MissingParameterError): - fetcher = bb.fetch.Fetch(urls, self.d) - - def create_shrinkwrap_file(self, data): - import json - datadir = os.path.join(self.tempdir, 'data') - swfile = os.path.join(datadir, 'npm-shrinkwrap.json') - bb.utils.mkdirhier(datadir) - with open(swfile, 'w') as f: - json.dump(data, f) - return swfile - - @skipIfNoNetwork() - def test_npmsw(self): - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz', - 'integrity': 'sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=', - 'dependencies': { - 'content-type': "1.0.4" - } - }, - 'node_modules/array-flatten/node_modules/content-type': { - 'version': '1.0.4', - 'resolved': 'https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz', - 'integrity': 'sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==', - 'dependencies': { - 'cookie': 'git+https://github.com/jshttp/cookie.git#aec1177c7da67e3b3273df96cf476824dbc9ae09' - } - }, - 'node_modules/array-flatten/node_modules/content-type/node_modules/cookie': { - 'resolved': 'git+https://github.com/jshttp/cookie.git#aec1177c7da67e3b3273df96cf476824dbc9ae09' - } - } - }) - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - fetcher.download() - self.assertTrue(os.path.exists(os.path.join(self.dldir, 'npm2', 'array-flatten-1.1.1.tgz'))) - self.assertTrue(os.path.exists(os.path.join(self.dldir, 'npm2', 'content-type-1.0.4.tgz'))) - self.assertTrue(os.path.exists(os.path.join(self.dldir, 'git2', 'github.com.jshttp.cookie.git'))) - fetcher.unpack(self.unpackdir) - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'npm-shrinkwrap.json'))) - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'node_modules', 'array-flatten', 'package.json'))) - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'node_modules', 'array-flatten', 'node_modules', 'content-type', 'package.json'))) - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'node_modules', 'array-flatten', 'node_modules', 'content-type', 'node_modules', 'cookie', 'package.json'))) - - @skipIfNoNetwork() - def test_npmsw_git(self): - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/cookie': { - 'resolved': 'git+https://github.com/jshttp/cookie.git#aec1177c7da67e3b3273df96cf476824dbc9ae09' - } - } - }) - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - fetcher.download() - self.assertTrue(os.path.exists(os.path.join(self.dldir, 'git2', 'github.com.jshttp.cookie.git'))) - - @skipIfNoNetwork() - def test_npmsw_dev(self): - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz', - 'integrity': 'sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=' - }, - 'node_modules/content-type': { - 'version': '1.0.4', - 'resolved': 'https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz', - 'integrity': 'sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==', - 'dev': True - } - } - }) - # Fetch with dev disabled - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - fetcher.download() - self.assertTrue(os.path.exists(os.path.join(self.dldir, 'npm2', 'array-flatten-1.1.1.tgz'))) - self.assertFalse(os.path.exists(os.path.join(self.dldir, 'npm2', 'content-type-1.0.4.tgz'))) - # Fetch with dev enabled - fetcher = bb.fetch.Fetch(['npmsw://' + swfile + ';dev=1'], self.d) - fetcher.download() - self.assertTrue(os.path.exists(os.path.join(self.dldir, 'npm2', 'array-flatten-1.1.1.tgz'))) - self.assertTrue(os.path.exists(os.path.join(self.dldir, 'npm2', 'content-type-1.0.4.tgz'))) - - @skipIfNoNetwork() - def test_npmsw_destsuffix(self): - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz', - 'integrity': 'sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=' - } - } - }) - fetcher = bb.fetch.Fetch(['npmsw://' + swfile + ';destsuffix=foo/bar'], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'foo', 'bar', 'node_modules', 'array-flatten', 'package.json'))) - - def test_npmsw_no_network_no_tarball(self): - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz', - 'integrity': 'sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=' - } - } - }) - self.d.setVar('BB_NO_NETWORK', '1') - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - with self.assertRaises(bb.fetch2.NetworkAccess): - fetcher.download() - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npmsw_no_network_with_tarball(self): - # Fetch once to get a tarball - fetcher = bb.fetch.Fetch(['npm://registry.npmjs.org;package=array-flatten;version=1.1.1'], self.d) - fetcher.download() - # Disable network access - self.d.setVar('BB_NO_NETWORK', '1') - # Fetch again - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz', - 'integrity': 'sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=' - } - } - }) - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'node_modules', 'array-flatten', 'package.json'))) - - @skipIfNoNetwork() - def test_npmsw_npm_reusability(self): - # Fetch once with npmsw - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz', - 'integrity': 'sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=' - } - } - }) - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - fetcher.download() - # Disable network access - self.d.setVar('BB_NO_NETWORK', '1') - # Fetch again with npm - fetcher = bb.fetch.Fetch(['npm://registry.npmjs.org;package=array-flatten;version=1.1.1'], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - self.assertTrue(os.path.exists(os.path.join(self.unpackdir, 'npm', 'package.json'))) - - @skipIfNoNetwork() - def test_npmsw_bad_checksum(self): - # Try to fetch with bad checksum - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz', - 'integrity': 'sha1-gfNEp2hqgLTFKT6P3AsBYMgsBqg=' - } - } - }) - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - with self.assertRaises(bb.fetch2.FetchError): - fetcher.download() - # Fetch correctly to get a tarball - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz', - 'integrity': 'sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=' - } - } - }) - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - fetcher.download() - localpath = os.path.join(self.dldir, 'npm2', 'array-flatten-1.1.1.tgz') - self.assertTrue(os.path.exists(localpath)) - # Modify the tarball - bad = b'bad checksum' - with open(localpath, 'wb') as f: - f.write(bad) - # Verify that the tarball is fetched again - fetcher.download() - badsum = hashlib.sha1(bad).hexdigest() - self.assertTrue(os.path.exists(localpath + '_bad-checksum_' + badsum)) - self.assertTrue(os.path.exists(localpath)) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npmsw_premirrors(self): - # Fetch once to get a tarball - fetcher = bb.fetch.Fetch(['npm://registry.npmjs.org;package=array-flatten;version=1.1.1'], self.d) - ud = fetcher.ud[fetcher.urls[0]] - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - # Setup the mirror - mirrordir = os.path.join(self.tempdir, 'mirror') - bb.utils.mkdirhier(mirrordir) - os.replace(ud.localpath, os.path.join(mirrordir, os.path.basename(ud.localpath))) - self.d.setVar('PREMIRRORS', 'https?$://.*/.* file://%s/' % mirrordir) - self.d.setVar('BB_FETCH_PREMIRRORONLY', '1') - # Fetch again - self.assertFalse(os.path.exists(ud.localpath)) - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz', - 'integrity': 'sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=' - } - } - }) - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - - @skipIfNoNpm() - @skipIfNoNetwork() - def test_npmsw_mirrors(self): - # Fetch once to get a tarball - fetcher = bb.fetch.Fetch(['npm://registry.npmjs.org;package=array-flatten;version=1.1.1'], self.d) - ud = fetcher.ud[fetcher.urls[0]] - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - # Setup the mirror - mirrordir = os.path.join(self.tempdir, 'mirror') - bb.utils.mkdirhier(mirrordir) - os.replace(ud.localpath, os.path.join(mirrordir, os.path.basename(ud.localpath))) - self.d.setVar('MIRRORS', 'https?$://.*/.* file://%s/' % mirrordir) - # Fetch again with invalid url - self.assertFalse(os.path.exists(ud.localpath)) - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://invalid', - 'integrity': 'sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=' - } - } - }) - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - - @skipIfNoNetwork() - def test_npmsw_bundled(self): - swfile = self.create_shrinkwrap_file({ - 'packages': { - 'node_modules/array-flatten': { - 'version': '1.1.1', - 'resolved': 'https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz', - 'integrity': 'sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=' - }, - 'node_modules/content-type': { - 'version': '1.0.4', - 'resolved': 'https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz', - 'integrity': 'sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==', - 'inBundle': True - } - } - }) - fetcher = bb.fetch.Fetch(['npmsw://' + swfile], self.d) - fetcher.download() - self.assertTrue(os.path.exists(os.path.join(self.dldir, 'npm2', 'array-flatten-1.1.1.tgz'))) - self.assertFalse(os.path.exists(os.path.join(self.dldir, 'npm2', 'content-type-1.0.4.tgz'))) - -class GitSharedTest(FetcherTest): - def setUp(self): - super(GitSharedTest, self).setUp() - self.recipe_url = "git://git.openembedded.org/bitbake;branch=master;protocol=https" - self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40') - self.d.setVar("__BBSRCREV_SEEN", "1") - - @skipIfNoNetwork() - def test_shared_unpack(self): - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - - fetcher.download() - fetcher.unpack(self.unpackdir) - alt = os.path.join(self.unpackdir, 'git/.git/objects/info/alternates') - self.assertTrue(os.path.exists(alt)) - - @skipIfNoNetwork() - def test_noshared_unpack(self): - self.d.setVar('BB_GIT_NOSHARED', '1') - self.unpackdir += '_noshared' - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - - fetcher.download() - fetcher.unpack(self.unpackdir) - alt = os.path.join(self.unpackdir, 'git/.git/objects/info/alternates') - self.assertFalse(os.path.exists(alt)) - -class GitTagVerificationTests(FetcherTest): - - @skipIfNoNetwork() - def test_tag_rev_match(self): - # Test a url with rev= and tag= set works - fetcher = bb.fetch.Fetch(["git://git.openembedded.org/bitbake;branch=2.12;protocol=https;rev=5b4e20377eea8d428edf1aeb2187c18f82ca6757;tag=2.12.0"], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - - def test_annotated_tag_rev_match(self): - # Test a url with rev= and tag= set works - # rev is the annotated tag revision in this case - fetcher = bb.fetch.Fetch(["git://git.openembedded.org/bitbake;branch=2.12;protocol=https;rev=fa30183549bd09f33fd4eebf56771ca5393526a6;tag=2.12.0"], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - - @skipIfNoNetwork() - def test_tag_rev_match2(self): - # Test a url with SRCREV and tag= set works - self.d.setVar('SRCREV', '5b4e20377eea8d428edf1aeb2187c18f82ca6757') - fetcher = bb.fetch.Fetch(["git://git.openembedded.org/bitbake;branch=2.12;protocol=https;tag=2.12.0"], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - - @skipIfNoNetwork() - def test_tag_rev_match3(self): - # Test a url with SRCREV, rev= and tag= set works - self.d.setVar('SRCREV', '5b4e20377eea8d428edf1aeb2187c18f82ca6757') - fetcher = bb.fetch.Fetch(["git://git.openembedded.org/bitbake;branch=2.12;protocol=https;rev=5b4e20377eea8d428edf1aeb2187c18f82ca6757;tag=2.12.0"], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - - @skipIfNoNetwork() - def test_tag_rev_match4(self): - # Test a url with SRCREV and rev= mismatching errors - self.d.setVar('SRCREV', 'bade540fc31a1c26839efd2c7785a751ce24ebfb') - with self.assertRaises(bb.fetch2.FetchError): - fetcher = bb.fetch.Fetch(["git://git.openembedded.org/bitbake;branch=2.12;protocol=https;rev=5b4e20377eea8d428edf1aeb2187c18f82ca6757;tag=2.12.0"], self.d) - - @skipIfNoNetwork() - def test_tag_rev_match5(self): - # Test a url with SRCREV, rev= and tag= set works when using shallow clones - self.d.setVar('BB_GIT_SHALLOW', '1') - self.d.setVar('SRCREV', '5b4e20377eea8d428edf1aeb2187c18f82ca6757') - fetcher = bb.fetch.Fetch(["git://git.openembedded.org/bitbake;branch=2.12;protocol=https;rev=5b4e20377eea8d428edf1aeb2187c18f82ca6757;tag=2.12.0"], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - - @skipIfNoNetwork() - def test_tag_rev_match6(self): - # Test a url with SRCREV, rev= and a mismatched tag= when using shallow clones - self.d.setVar('BB_GIT_SHALLOW', '1') - fetcher = bb.fetch.Fetch(["git://git.openembedded.org/bitbake;branch=2.12;protocol=https;rev=5b4e20377eea8d428edf1aeb2187c18f82ca6757;tag=2.8.0"], self.d) - fetcher.download() - with self.assertRaises(bb.fetch2.FetchError): - fetcher.unpack(self.unpackdir) - - @skipIfNoNetwork() - def test_tag_rev_match7(self): - # Test a url with SRCREV, rev= and a mismatched tag= - self.d.setVar('SRCREV', '5b4e20377eea8d428edf1aeb2187c18f82ca6757') - fetcher = bb.fetch.Fetch(["git://git.openembedded.org/bitbake;branch=2.12;protocol=https;rev=5b4e20377eea8d428edf1aeb2187c18f82ca6757;tag=2.8.0"], self.d) - fetcher.download() - with self.assertRaises(bb.fetch2.FetchError): - fetcher.unpack(self.unpackdir) - - -class FetchPremirroronlyLocalTest(FetcherTest): - - def setUp(self): - super(FetchPremirroronlyLocalTest, self).setUp() - self.mirrordir = os.path.join(self.tempdir, "mirrors") - os.mkdir(self.mirrordir) - self.reponame = "bitbake" - self.gitdir = os.path.join(self.tempdir, "git", self.reponame) - self.recipe_url = "git://git.fake.repo/bitbake;branch=master;protocol=https" - self.d.setVar("BB_FETCH_PREMIRRORONLY", "1") - self.d.setVar("BB_NO_NETWORK", "1") - self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n") - self.mirrorname = "git2_git.fake.repo.bitbake.tar.gz" - self.mirrorfile = os.path.join(self.mirrordir, self.mirrorname) - self.testfilename = "bitbake-fetch.test" - - def make_git_repo(self): - recipeurl = "git:/git.fake.repo/bitbake" - os.makedirs(self.gitdir) - self.git_init(cwd=self.gitdir) - for i in range(0): - self.git_new_commit() - bb.process.run('tar -czvf {} .'.format(os.path.join(self.mirrordir, self.mirrorname)), cwd = self.gitdir) - - def git_new_commit(self): - import random - os.unlink(os.path.join(self.mirrordir, self.mirrorname)) - branch = self.git("branch --show-current", self.gitdir).split() - with open(os.path.join(self.gitdir, self.testfilename), "w") as testfile: - testfile.write("File {} from branch {}; Useless random data {}".format(self.testfilename, branch, random.random())) - self.git("add {}".format(self.testfilename), self.gitdir) - self.git("commit -a -m \"This random commit {} in branch {}. I'm useless.\"".format(random.random(), branch), self.gitdir) - bb.process.run('tar -czvf {} .'.format(os.path.join(self.mirrordir, self.mirrorname)), cwd = self.gitdir) - return self.git("rev-parse HEAD", self.gitdir).strip() - - def git_new_branch(self, name): - self.git_new_commit() - head = self.git("rev-parse HEAD", self.gitdir).strip() - self.git("checkout -b {}".format(name), self.gitdir) - newrev = self.git_new_commit() - self.git("checkout {}".format(head), self.gitdir) - return newrev - - def test_mirror_multiple_fetches(self): - self.make_git_repo() - self.d.setVar("SRCREV", self.git_new_commit()) - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - ## New commit in premirror. it's not in the download_dir - self.d.setVar("SRCREV", self.git_new_commit()) - fetcher2 = bb.fetch.Fetch([self.recipe_url], self.d) - fetcher2.download() - fetcher2.unpack(self.unpackdir) - ## New commit in premirror. it's not in the download_dir - self.d.setVar("SRCREV", self.git_new_commit()) - fetcher3 = bb.fetch.Fetch([self.recipe_url], self.d) - fetcher3.download() - fetcher3.unpack(self.unpackdir) - - - def test_mirror_commit_nonexistent(self): - self.make_git_repo() - self.d.setVar("SRCREV", "0"*40) - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - with self.assertRaises(bb.fetch2.NetworkAccess): - fetcher.download() - - def test_mirror_commit_exists(self): - self.make_git_repo() - self.d.setVar("SRCREV", self.git_new_commit()) - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - - def test_mirror_tarball_nonexistent(self): - self.d.setVar("SRCREV", "0"*40) - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - with self.assertRaises(bb.fetch2.NetworkAccess): - fetcher.download() - - -class FetchPremirroronlyNetworkTest(FetcherTest): - - def setUp(self): - super(FetchPremirroronlyNetworkTest, self).setUp() - self.mirrordir = os.path.join(self.tempdir, "mirrors") - os.mkdir(self.mirrordir) - self.reponame = "fstests" - self.clonedir = os.path.join(self.tempdir, "git") - self.gitdir = os.path.join(self.tempdir, "git", "{}.git".format(self.reponame)) - self.recipe_url = "git://git.yoctoproject.org/fstests;protocol=https;branch=master" - self.d.setVar("BB_FETCH_PREMIRRORONLY", "1") - self.d.setVar("BB_NO_NETWORK", "0") - self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n") - - def make_git_repo(self): - self.mirrorname = "git2_git.yoctoproject.org.fstests.tar.gz" - os.makedirs(self.clonedir) - self.git("clone --bare {}".format(self.recipe_url), self.clonedir) - self.git("update-ref HEAD 15413486df1f5a5b5af699b6f3ba5f0984e52a9f", self.gitdir) - bb.process.run('tar -czvf {} .'.format(os.path.join(self.mirrordir, self.mirrorname)), cwd = self.gitdir) - shutil.rmtree(self.clonedir) - - @skipIfNoNetwork() - def test_mirror_tarball_updated(self): - self.make_git_repo() - ## Upstream commit is in the mirror - self.d.setVar("SRCREV", "15413486df1f5a5b5af699b6f3ba5f0984e52a9f") - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - fetcher.download() - - @skipIfNoNetwork() - def test_mirror_tarball_outdated(self): - self.make_git_repo() - ## Upstream commit not in the mirror - self.d.setVar("SRCREV", "49d65d53c2bf558ae6e9185af0f3af7b79d255ec") - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - with self.assertRaises(bb.fetch2.NetworkAccess): - fetcher.download() - -class FetchPremirroronlyMercurialTest(FetcherTest): - """ Test for premirrors with mercurial repos - the test covers also basic hg:// clone (see fetch_and_create_tarball - """ - def skipIfNoHg(): - if not shutil.which('hg'): - return unittest.skip('Mercurial not installed') - return lambda f: f - - def setUp(self): - super(FetchPremirroronlyMercurialTest, self).setUp() - self.mirrordir = os.path.join(self.tempdir, "mirrors") - os.mkdir(self.mirrordir) - self.reponame = "libgnt" - self.clonedir = os.path.join(self.tempdir, "hg") - self.recipe_url = "hg://keep.imfreedom.org/libgnt;module=libgnt" - self.d.setVar("SRCREV", "53e8b422faaf") - self.mirrorname = "hg_libgnt_keep.imfreedom.org_.libgnt.tar.gz" - - def fetch_and_create_tarball(self): - """ - Ask bitbake to download repo and prepare mirror tarball for us - """ - self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "1") - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - fetcher.download() - mirrorfile = os.path.join(self.d.getVar("DL_DIR"), self.mirrorname) - self.assertTrue(os.path.exists(mirrorfile), "Mirror tarball {} has not been created".format(mirrorfile)) - ## moving tarball to mirror directory - os.rename(mirrorfile, os.path.join(self.mirrordir, self.mirrorname)) - self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "0") - - - @skipIfNoNetwork() - @skipIfNoHg() - def test_premirror_mercurial(self): - self.fetch_and_create_tarball() - self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n") - self.d.setVar("BB_FETCH_PREMIRRORONLY", "1") - self.d.setVar("BB_NO_NETWORK", "1") - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - fetcher.download() - -class FetchPremirroronlyBrokenTarball(FetcherTest): - - def setUp(self): - super(FetchPremirroronlyBrokenTarball, self).setUp() - self.mirrordir = os.path.join(self.tempdir, "mirrors") - os.mkdir(self.mirrordir) - self.reponame = "bitbake" - self.gitdir = os.path.join(self.tempdir, "git", self.reponame) - self.recipe_url = "git://git.fake.repo/bitbake;protocol=https;branch=master" - self.d.setVar("BB_FETCH_PREMIRRORONLY", "1") - self.d.setVar("BB_NO_NETWORK", "1") - self.d.setVar("PREMIRRORS", self.recipe_url + " " + "file://{}".format(self.mirrordir) + " \n") - self.mirrorname = "git2_git.fake.repo.bitbake.tar.gz" - with open(os.path.join(self.mirrordir, self.mirrorname), 'w') as targz: - targz.write("This is not tar.gz file!") - - def test_mirror_broken_download(self): - self.d.setVar("SRCREV", "0"*40) - fetcher = bb.fetch.Fetch([self.recipe_url], self.d) - with self.assertRaises(bb.fetch2.FetchError), self.assertLogs() as logs: - fetcher.download() - output = "".join(logs.output) - self.assertFalse(" not a git repository (or any parent up to mount point /)" in output) - -class GoModTest(FetcherTest): - - @skipIfNoNetwork() - def test_gomod_url(self): - urls = ['gomod://github.com/Azure/azure-sdk-for-go/sdk/storage/azblob;version=v1.0.0;' - 'sha256sum=9bb69aea32f1d59711701f9562d66432c9c0374205e5009d1d1a62f03fb4fdad'] - - fetcher = bb.fetch2.Fetch(urls, self.d) - ud = fetcher.ud[urls[0]] - self.assertEqual(ud.url, 'https://proxy.golang.org/github.com/%21azure/azure-sdk-for-go/sdk/storage/azblob/%40v/v1.0.0.zip') - self.assertEqual(ud.parm['downloadfilename'], 'github.com.Azure.azure-sdk-for-go.sdk.storage.azblob@v1.0.0.zip') - self.assertEqual(ud.parm['name'], 'github.com/Azure/azure-sdk-for-go/sdk/storage/azblob@v1.0.0') - - fetcher.download() - fetcher.unpack(self.unpackdir) - downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download') - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.zip'))) - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.mod'))) - self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.mod')), - '7873b8544842329b4f385a3aa6cf82cc2bc8defb41a04fa5291c35fd5900e873') - - @skipIfNoNetwork() - def test_gomod_url_go_mod_only(self): - urls = ['gomod://github.com/Azure/azure-sdk-for-go/sdk/storage/azblob;version=v1.0.0;mod=1;' - 'sha256sum=7873b8544842329b4f385a3aa6cf82cc2bc8defb41a04fa5291c35fd5900e873'] - - fetcher = bb.fetch2.Fetch(urls, self.d) - ud = fetcher.ud[urls[0]] - self.assertEqual(ud.url, 'https://proxy.golang.org/github.com/%21azure/azure-sdk-for-go/sdk/storage/azblob/%40v/v1.0.0.mod') - self.assertEqual(ud.parm['downloadfilename'], 'github.com.Azure.azure-sdk-for-go.sdk.storage.azblob@v1.0.0.mod') - self.assertEqual(ud.parm['name'], 'github.com/Azure/azure-sdk-for-go/sdk/storage/azblob@v1.0.0') - - fetcher.download() - fetcher.unpack(self.unpackdir) - downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download') - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.mod'))) - - @skipIfNoNetwork() - def test_gomod_url_sha256sum_varflag(self): - urls = ['gomod://gopkg.in/ini.v1;version=v1.67.0'] - self.d.setVarFlag('SRC_URI', 'gopkg.in/ini.v1@v1.67.0.sha256sum', 'bd845dfc762a87a56e5a32a07770dc83e86976db7705d7f89c5dbafdc60b06c6') - - fetcher = bb.fetch2.Fetch(urls, self.d) - ud = fetcher.ud[urls[0]] - self.assertEqual(ud.url, 'https://proxy.golang.org/gopkg.in/ini.v1/%40v/v1.67.0.zip') - self.assertEqual(ud.parm['downloadfilename'], 'gopkg.in.ini.v1@v1.67.0.zip') - self.assertEqual(ud.parm['name'], 'gopkg.in/ini.v1@v1.67.0') - - fetcher.download() - fetcher.unpack(self.unpackdir) - downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download') - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.zip'))) - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod'))) - self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')), - '13aedd85db8e555104108e0e613bb7e4d1242af7f27c15423dd9ab63b60b72a1') - - @skipIfNoNetwork() - def test_gomod_url_no_go_mod_in_module(self): - urls = ['gomod://gopkg.in/ini.v1;version=v1.67.0;' - 'sha256sum=bd845dfc762a87a56e5a32a07770dc83e86976db7705d7f89c5dbafdc60b06c6'] - - fetcher = bb.fetch2.Fetch(urls, self.d) - ud = fetcher.ud[urls[0]] - self.assertEqual(ud.url, 'https://proxy.golang.org/gopkg.in/ini.v1/%40v/v1.67.0.zip') - self.assertEqual(ud.parm['downloadfilename'], 'gopkg.in.ini.v1@v1.67.0.zip') - self.assertEqual(ud.parm['name'], 'gopkg.in/ini.v1@v1.67.0') - - fetcher.download() - fetcher.unpack(self.unpackdir) - downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download') - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.zip'))) - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod'))) - self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')), - '13aedd85db8e555104108e0e613bb7e4d1242af7f27c15423dd9ab63b60b72a1') - - @skipIfNoNetwork() - def test_gomod_url_host_only(self): - urls = ['gomod://go.opencensus.io;version=v0.24.0;' - 'sha256sum=203a767d7f8e7c1ebe5588220ad168d1e15b14ae70a636de7ca9a4a88a7e0d0c'] - - fetcher = bb.fetch2.Fetch(urls, self.d) - ud = fetcher.ud[urls[0]] - self.assertEqual(ud.url, 'https://proxy.golang.org/go.opencensus.io/%40v/v0.24.0.zip') - self.assertEqual(ud.parm['downloadfilename'], 'go.opencensus.io@v0.24.0.zip') - self.assertEqual(ud.parm['name'], 'go.opencensus.io@v0.24.0') - - fetcher.download() - fetcher.unpack(self.unpackdir) - downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download') - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.zip'))) - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.mod'))) - self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.mod')), - '0dc9ccc660ad21cebaffd548f2cc6efa27891c68b4fbc1f8a3893b00f1acec96') - -class GoModGitTest(FetcherTest): - - @skipIfNoNetwork() - def test_gomodgit_url_repo(self): - urls = ['gomodgit://golang.org/x/net;version=v0.9.0;' - 'repo=go.googlesource.com/net;' - 'srcrev=694cff8668bac64e0864b552bffc280cd27f21b1'] - - fetcher = bb.fetch2.Fetch(urls, self.d) - ud = fetcher.ud[urls[0]] - self.assertEqual(ud.host, 'go.googlesource.com') - self.assertEqual(ud.path, '/net') - self.assertEqual(ud.name, 'golang.org/x/net@v0.9.0') - self.assertEqual(self.d.getVar('SRCREV_golang.org/x/net@v0.9.0'), '694cff8668bac64e0864b552bffc280cd27f21b1') - - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - - fetcher.unpack(self.unpackdir) - vcsdir = os.path.join(self.unpackdir, 'pkg/mod/cache/vcs') - self.assertTrue(os.path.exists(os.path.join(vcsdir, 'ed42bd05533fd84ae290a5d33ebd3695a0a2b06131beebd5450825bee8603aca'))) - downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download') - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'golang.org/x/net/@v/v0.9.0.zip'))) - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'golang.org/x/net/@v/v0.9.0.mod'))) - self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'golang.org/x/net/@v/v0.9.0.mod')), - 'c5d6851ede50ec1c001afb763040194b68961bf06997e2605e8bf06dcd2aeb2e') - - @skipIfNoNetwork() - def test_gomodgit_url_subdir(self): - urls = ['gomodgit://github.com/Azure/azure-sdk-for-go/sdk/storage/azblob;version=v1.0.0;' - 'repo=github.com/Azure/azure-sdk-for-go;subdir=sdk/storage/azblob;' - 'srcrev=ec928e0ed34db682b3f783d3739d1c538142e0c3'] - - fetcher = bb.fetch2.Fetch(urls, self.d) - ud = fetcher.ud[urls[0]] - self.assertEqual(ud.host, 'github.com') - self.assertEqual(ud.path, '/Azure/azure-sdk-for-go') - self.assertEqual(ud.parm['subpath'], 'sdk/storage/azblob') - self.assertEqual(ud.name, 'github.com/Azure/azure-sdk-for-go/sdk/storage/azblob@v1.0.0') - self.assertEqual(self.d.getVar('SRCREV_github.com/Azure/azure-sdk-for-go/sdk/storage/azblob@v1.0.0'), 'ec928e0ed34db682b3f783d3739d1c538142e0c3') - - fetcher.download() - self.assertTrue(os.path.exists(ud.localpath)) - - fetcher.unpack(self.unpackdir) - vcsdir = os.path.join(self.unpackdir, 'pkg/mod/cache/vcs') - self.assertTrue(os.path.exists(os.path.join(vcsdir, 'd31d6145676ed3066ce573a8198f326dea5be45a43b3d8f41ce7787fd71d66b3'))) - downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download') - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.zip'))) - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.mod'))) - self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.mod')), - '7873b8544842329b4f385a3aa6cf82cc2bc8defb41a04fa5291c35fd5900e873') - - @skipIfNoNetwork() - def test_gomodgit_url_srcrev_var(self): - urls = ['gomodgit://gopkg.in/ini.v1;version=v1.67.0'] - self.d.setVar('SRCREV_gopkg.in/ini.v1@v1.67.0', 'b2f570e5b5b844226bbefe6fb521d891f529a951') - - fetcher = bb.fetch2.Fetch(urls, self.d) - ud = fetcher.ud[urls[0]] - self.assertEqual(ud.host, 'gopkg.in') - self.assertEqual(ud.path, '/ini.v1') - self.assertEqual(ud.name, 'gopkg.in/ini.v1@v1.67.0') - self.assertEqual(ud.parm['srcrev'], 'b2f570e5b5b844226bbefe6fb521d891f529a951') - - fetcher.download() - fetcher.unpack(self.unpackdir) - vcsdir = os.path.join(self.unpackdir, 'pkg/mod/cache/vcs') - self.assertTrue(os.path.exists(os.path.join(vcsdir, 'b7879a4be9ba8598851b8278b14c4f71a8316be64913298d1639cce6bde59bc3'))) - downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download') - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.zip'))) - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod'))) - self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')), - '13aedd85db8e555104108e0e613bb7e4d1242af7f27c15423dd9ab63b60b72a1') - - @skipIfNoNetwork() - def test_gomodgit_url_no_go_mod_in_module(self): - urls = ['gomodgit://gopkg.in/ini.v1;version=v1.67.0;' - 'srcrev=b2f570e5b5b844226bbefe6fb521d891f529a951'] - - fetcher = bb.fetch2.Fetch(urls, self.d) - ud = fetcher.ud[urls[0]] - self.assertEqual(ud.host, 'gopkg.in') - self.assertEqual(ud.path, '/ini.v1') - self.assertEqual(ud.name, 'gopkg.in/ini.v1@v1.67.0') - self.assertEqual(self.d.getVar('SRCREV_gopkg.in/ini.v1@v1.67.0'), 'b2f570e5b5b844226bbefe6fb521d891f529a951') - - fetcher.download() - fetcher.unpack(self.unpackdir) - vcsdir = os.path.join(self.unpackdir, 'pkg/mod/cache/vcs') - self.assertTrue(os.path.exists(os.path.join(vcsdir, 'b7879a4be9ba8598851b8278b14c4f71a8316be64913298d1639cce6bde59bc3'))) - downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download') - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.zip'))) - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod'))) - self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')), - '13aedd85db8e555104108e0e613bb7e4d1242af7f27c15423dd9ab63b60b72a1') - - @skipIfNoNetwork() - def test_gomodgit_url_host_only(self): - urls = ['gomodgit://go.opencensus.io;version=v0.24.0;' - 'repo=github.com/census-instrumentation/opencensus-go;' - 'srcrev=b1a01ee95db0e690d91d7193d037447816fae4c5'] - - fetcher = bb.fetch2.Fetch(urls, self.d) - ud = fetcher.ud[urls[0]] - self.assertEqual(ud.host, 'github.com') - self.assertEqual(ud.path, '/census-instrumentation/opencensus-go') - self.assertEqual(ud.name, 'go.opencensus.io@v0.24.0') - self.assertEqual(self.d.getVar('SRCREV_go.opencensus.io@v0.24.0'), 'b1a01ee95db0e690d91d7193d037447816fae4c5') - - fetcher.download() - fetcher.unpack(self.unpackdir) - vcsdir = os.path.join(self.unpackdir, 'pkg/mod/cache/vcs') - self.assertTrue(os.path.exists(os.path.join(vcsdir, 'aae3ac7b2122ed3345654e6327855e9682f4a5350d63e93dbcfc51c4419df0e1'))) - downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download') - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.zip'))) - self.assertTrue(os.path.exists(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.mod'))) - self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.mod')), - '0dc9ccc660ad21cebaffd548f2cc6efa27891c68b4fbc1f8a3893b00f1acec96') diff --git a/bitbake/lib/bb/tests/filter.py b/bitbake/lib/bb/tests/filter.py deleted file mode 100644 index 245df7b22b..0000000000 --- a/bitbake/lib/bb/tests/filter.py +++ /dev/null @@ -1,88 +0,0 @@ -# -# Copyright (C) 2025 Garmin Ltd. or its subsidiaries -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import unittest -import bb.filter - - -class BuiltinFilterTest(unittest.TestCase): - def test_disallowed_builtins(self): - with self.assertRaises(NameError): - val = bb.filter.apply_filters("1", ["open('foo.txt', 'rb')"]) - - def test_prefix(self): - val = bb.filter.apply_filters("1 2 3", ["prefix(val, 'a')"]) - self.assertEqual(val, "a1 a2 a3") - - val = bb.filter.apply_filters("", ["prefix(val, 'a')"]) - self.assertEqual(val, "") - - def test_suffix(self): - val = bb.filter.apply_filters("1 2 3", ["suffix(val, 'b')"]) - self.assertEqual(val, "1b 2b 3b") - - val = bb.filter.apply_filters("", ["suffix(val, 'b')"]) - self.assertEqual(val, "") - - def test_sort(self): - val = bb.filter.apply_filters("z y x", ["sort(val)"]) - self.assertEqual(val, "x y z") - - val = bb.filter.apply_filters("", ["sort(val)"]) - self.assertEqual(val, "") - - def test_identity(self): - val = bb.filter.apply_filters("1 2 3", ["val"]) - self.assertEqual(val, "1 2 3") - - val = bb.filter.apply_filters("123", ["val"]) - self.assertEqual(val, "123") - - def test_empty(self): - val = bb.filter.apply_filters("1 2 3", ["", "prefix(val, 'a')", ""]) - self.assertEqual(val, "a1 a2 a3") - - def test_nested(self): - val = bb.filter.apply_filters("1 2 3", ["prefix(prefix(val, 'a'), 'b')"]) - self.assertEqual(val, "ba1 ba2 ba3") - - val = bb.filter.apply_filters("1 2 3", ["prefix(prefix(val, 'b'), 'a')"]) - self.assertEqual(val, "ab1 ab2 ab3") - - def test_filter_order(self): - val = bb.filter.apply_filters("1 2 3", ["prefix(val, 'a')", "prefix(val, 'b')"]) - self.assertEqual(val, "ba1 ba2 ba3") - - val = bb.filter.apply_filters("1 2 3", ["prefix(val, 'b')", "prefix(val, 'a')"]) - self.assertEqual(val, "ab1 ab2 ab3") - - val = bb.filter.apply_filters("1 2 3", ["prefix(val, 'a')", "suffix(val, 'b')"]) - self.assertEqual(val, "a1b a2b a3b") - - val = bb.filter.apply_filters("1 2 3", ["suffix(val, 'b')", "prefix(val, 'a')"]) - self.assertEqual(val, "a1b a2b a3b") - - def test_remove(self): - val = bb.filter.apply_filters("1 2 3", ["remove(val, ['2'])"]) - self.assertEqual(val, "1 3") - - val = bb.filter.apply_filters("1,2,3", ["remove(val, ['2'], ',')"]) - self.assertEqual(val, "1,3") - - val = bb.filter.apply_filters("1 2 3", ["remove(val, ['4'])"]) - self.assertEqual(val, "1 2 3") - - val = bb.filter.apply_filters("1 2 3", ["remove(val, ['1', '2'])"]) - self.assertEqual(val, "3") - - val = bb.filter.apply_filters("1 2 3", ["remove(val, '2')"]) - self.assertEqual(val, "1 3") - - val = bb.filter.apply_filters("1 2 3", ["remove(val, '4')"]) - self.assertEqual(val, "1 2 3") - - val = bb.filter.apply_filters("1 2 3", ["remove(val, '1 2')"]) - self.assertEqual(val, "3") diff --git a/bitbake/lib/bb/tests/parse.py b/bitbake/lib/bb/tests/parse.py deleted file mode 100644 index d3867ece98..0000000000 --- a/bitbake/lib/bb/tests/parse.py +++ /dev/null @@ -1,510 +0,0 @@ -# -# BitBake Test for lib/bb/parse/ -# -# Copyright (C) 2015 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import unittest -import tempfile -import logging -import bb -import os - -logger = logging.getLogger('BitBake.TestParse') - -import bb.parse -import bb.data -import bb.siggen - -class ParseTest(unittest.TestCase): - - testfile = """ -A = "1" -B = "2" -do_install() { - echo "hello" -} - -C = "3" -""" - - def setUp(self): - self.origdir = os.getcwd() - self.d = bb.data.init() - bb.parse.siggen = bb.siggen.init(self.d) - - def tearDown(self): - os.chdir(self.origdir) - - def parsehelper(self, content, suffix = ".bb"): - f = tempfile.NamedTemporaryFile(suffix = suffix) - f.write(bytes(content, "utf-8")) - f.flush() - os.chdir(os.path.dirname(f.name)) - return f - - def test_parse_simple(self): - with self.parsehelper(self.testfile) as f: - d = bb.parse.handle(f.name, self.d)[''] - self.assertEqual(d.getVar("A"), "1") - self.assertEqual(d.getVar("B"), "2") - self.assertEqual(d.getVar("C"), "3") - - def test_parse_incomplete_function(self): - testfileB = self.testfile.replace("}", "") - with self.parsehelper(testfileB) as f: - with self.assertRaises(bb.parse.ParseError): - d = bb.parse.handle(f.name, self.d)[''] - - unsettest = """ -A = "1" -B = "2" -B[flag] = "3" - -unset A -unset B[flag] -""" - - def test_parse_unset(self): - with self.parsehelper(self.unsettest) as f: - d = bb.parse.handle(f.name, self.d)[''] - self.assertEqual(d.getVar("A"), None) - self.assertEqual(d.getVarFlag("A","flag"), None) - self.assertEqual(d.getVar("B"), "2") - - defaulttest = """ -A = "set value" -A ??= "default value" - -A[flag_set_vs_question] = "set flag" -A[flag_set_vs_question] ?= "question flag" - -A[flag_set_vs_default] = "set flag" -A[flag_set_vs_default] ??= "default flag" - -A[flag_question] ?= "question flag" - -A[flag_default] ??= "default flag" - -A[flag_question_vs_default] ?= "question flag" -A[flag_question_vs_default] ??= "default flag" - -A[flag_default_vs_question] ??= "default flag" -A[flag_default_vs_question] ?= "question flag" - -A[flag_set_question_default] = "set flag" -A[flag_set_question_default] ?= "question flag" -A[flag_set_question_default] ??= "default flag" - -A[flag_set_default_question] = "set flag" -A[flag_set_default_question] ??= "default flag" -A[flag_set_default_question] ?= "question flag" - -A[flag_set_twice] = "set flag first" -A[flag_set_twice] = "set flag second" - -A[flag_question_twice] ?= "question flag first" -A[flag_question_twice] ?= "question flag second" - -A[flag_default_twice] ??= "default flag first" -A[flag_default_twice] ??= "default flag second" -""" - def test_parse_defaulttest(self): - with self.parsehelper(self.defaulttest) as f: - d = bb.parse.handle(f.name, self.d)[''] - self.assertEqual(d.getVar("A"), "set value") - self.assertEqual(d.getVarFlag("A","flag_set_vs_question"), "set flag") - self.assertEqual(d.getVarFlag("A","flag_set_vs_default"), "set flag") - self.assertEqual(d.getVarFlag("A","flag_question"), "question flag") - self.assertEqual(d.getVarFlag("A","flag_default"), "default flag") - self.assertEqual(d.getVarFlag("A","flag_question_vs_default"), "question flag") - self.assertEqual(d.getVarFlag("A","flag_default_vs_question"), "question flag") - self.assertEqual(d.getVarFlag("A","flag_set_question_default"), "set flag") - self.assertEqual(d.getVarFlag("A","flag_set_default_question"), "set flag") - self.assertEqual(d.getVarFlag("A","flag_set_twice"), "set flag second") - self.assertEqual(d.getVarFlag("A","flag_question_twice"), "question flag first") - self.assertEqual(d.getVarFlag("A","flag_default_twice"), "default flag second") - - exporttest = """ -A = "a" -export B = "b" -export C -exportD = "d" -""" - - def test_parse_exports(self): - with self.parsehelper(self.exporttest) as f: - d = bb.parse.handle(f.name, self.d)[''] - self.assertEqual(d.getVar("A"), "a") - self.assertIsNone(d.getVarFlag("A", "export")) - self.assertEqual(d.getVar("B"), "b") - self.assertEqual(d.getVarFlag("B", "export"), 1) - self.assertIsNone(d.getVar("C")) - self.assertEqual(d.getVarFlag("C", "export"), 1) - self.assertIsNone(d.getVar("D")) - self.assertIsNone(d.getVarFlag("D", "export")) - self.assertEqual(d.getVar("exportD"), "d") - self.assertIsNone(d.getVarFlag("exportD", "export")) - - overridetest = """ -RRECOMMENDS:${PN} = "a" -RRECOMMENDS:${PN}:libc = "b" -OVERRIDES = "libc:${PN}" -PN = "gtk+" -""" - - def test_parse_overrides(self): - with self.parsehelper(self.overridetest) as f: - d = bb.parse.handle(f.name, self.d)[''] - self.assertEqual(d.getVar("RRECOMMENDS"), "b") - bb.data.expandKeys(d) - self.assertEqual(d.getVar("RRECOMMENDS"), "b") - d.setVar("RRECOMMENDS:gtk+", "c") - self.assertEqual(d.getVar("RRECOMMENDS"), "c") - - overridetest2 = """ -EXTRA_OECONF = "" -EXTRA_OECONF:class-target = "b" -EXTRA_OECONF:append = " c" -""" - - def test_parse_overrides2(self): - with self.parsehelper(self.overridetest2) as f: - d = bb.parse.handle(f.name, self.d)[''] - d.appendVar("EXTRA_OECONF", " d") - d.setVar("OVERRIDES", "class-target") - self.assertEqual(d.getVar("EXTRA_OECONF"), "b c d") - - overridetest3 = """ -DESCRIPTION = "A" -DESCRIPTION:${PN}-dev = "${DESCRIPTION} B" -PN = "bc" -""" - - def test_parse_combinations(self): - with self.parsehelper(self.overridetest3) as f: - d = bb.parse.handle(f.name, self.d)[''] - bb.data.expandKeys(d) - self.assertEqual(d.getVar("DESCRIPTION:bc-dev"), "A B") - d.setVar("DESCRIPTION", "E") - d.setVar("DESCRIPTION:bc-dev", "C D") - d.setVar("OVERRIDES", "bc-dev") - self.assertEqual(d.getVar("DESCRIPTION"), "C D") - - classextend = """ -VAR_var:override1 = "B" -EXTRA = ":override1" -OVERRIDES = "nothing${EXTRA}" - -BBCLASSEXTEND = "###CLASS###" -""" - classextend_bbclass = """ -EXTRA = "" -python () { - d.renameVar("VAR_var", "VAR_var2") -} -""" - - # - # Test based upon a real world data corruption issue. One - # data store changing a variable poked through into a different data - # store. This test case replicates that issue where the value 'B' would - # become unset/disappear. - # - def test_parse_classextend_contamination(self): - self.d.setVar("__bbclasstype", "recipe") - with self.parsehelper(self.classextend_bbclass, suffix=".bbclass") as cls: - #clsname = os.path.basename(cls.name).replace(".bbclass", "") - self.classextend = self.classextend.replace("###CLASS###", cls.name) - with self.parsehelper(self.classextend) as f: - alldata = bb.parse.handle(f.name, self.d) - d1 = alldata[''] - d2 = alldata[cls.name] - self.assertEqual(d1.getVar("VAR_var"), "B") - self.assertEqual(d2.getVar("VAR_var"), None) - - addtask_deltask = """ -addtask do_patch after do_foo after do_unpack before do_configure before do_compile -addtask do_fetch2 do_patch2 - -addtask do_myplaintask -addtask do_myplaintask2 -deltask do_myplaintask2 -addtask do_mytask# comment -addtask do_mytask2 # comment2 -addtask do_mytask3 -deltask do_mytask3# comment -deltask do_mytask4 # comment2 - -# Ensure a missing task prefix on after works -addtask do_mytask5 after mytask - -MYVAR = "do_patch" -EMPTYVAR = "" -deltask do_fetch ${MYVAR} ${EMPTYVAR} -deltask ${EMPTYVAR} -""" - def test_parse_addtask_deltask(self): - with self.parsehelper(self.addtask_deltask) as f: - d = bb.parse.handle(f.name, self.d)[''] - self.assertSequenceEqual(['do_fetch2', 'do_patch2', 'do_myplaintask', 'do_mytask', 'do_mytask2', 'do_mytask5'], bb.build.listtasks(d)) - self.assertEqual(['do_mytask'], d.getVarFlag("do_mytask5", "deps")) - - broken_multiline_comment = """ -# First line of comment \\ -# Second line of comment \\ - -""" - def test_parse_broken_multiline_comment(self): - with self.parsehelper(self.broken_multiline_comment) as f: - with self.assertRaises(bb.BBHandledException): - d = bb.parse.handle(f.name, self.d)[''] - - comment_in_var = """ -VAR = " \\ - SOMEVAL \\ -# some comment \\ - SOMEOTHERVAL \\ -" -""" - def test_parse_comment_in_var(self): - with self.parsehelper(self.comment_in_var) as f: - with self.assertRaises(bb.BBHandledException): - d = bb.parse.handle(f.name, self.d)[''] - - at_sign_in_var_flag = """ -A[flag@.service] = "nonet" -B[flag@.target] = "ntb" -C[f] = "flag" - -unset A[flag@.service] -""" - def test_parse_at_sign_in_var_flag(self): - with self.parsehelper(self.at_sign_in_var_flag) as f: - d = bb.parse.handle(f.name, self.d)[''] - self.assertEqual(d.getVar("A"), None) - self.assertEqual(d.getVar("B"), None) - self.assertEqual(d.getVarFlag("A","flag@.service"), None) - self.assertEqual(d.getVarFlag("B","flag@.target"), "ntb") - self.assertEqual(d.getVarFlag("C","f"), "flag") - - def test_parse_invalid_at_sign_in_var_flag(self): - invalid_at_sign = self.at_sign_in_var_flag.replace("B[f", "B[@f") - with self.parsehelper(invalid_at_sign) as f: - with self.assertRaises(bb.parse.ParseError): - d = bb.parse.handle(f.name, self.d)[''] - - export_function_recipe = """ -inherit someclass -""" - - export_function_recipe2 = """ -inherit someclass - -do_compile () { - false -} - -python do_compilepython () { - bb.note("Something else") -} - -""" - export_function_class = """ -someclass_do_compile() { - true -} - -python someclass_do_compilepython () { - bb.note("Something") -} - -EXPORT_FUNCTIONS do_compile do_compilepython -""" - - export_function_class2 = """ -secondclass_do_compile() { - true -} - -python secondclass_do_compilepython () { - bb.note("Something") -} - -EXPORT_FUNCTIONS do_compile do_compilepython -""" - - def test_parse_export_functions(self): - def check_function_flags(d): - self.assertEqual(d.getVarFlag("do_compile", "func"), 1) - self.assertEqual(d.getVarFlag("do_compilepython", "func"), 1) - self.assertEqual(d.getVarFlag("do_compile", "python"), None) - self.assertEqual(d.getVarFlag("do_compilepython", "python"), "1") - - with tempfile.TemporaryDirectory() as tempdir: - self.d.setVar("__bbclasstype", "recipe") - recipename = tempdir + "/recipe.bb" - os.makedirs(tempdir + "/classes") - with open(tempdir + "/classes/someclass.bbclass", "w") as f: - f.write(self.export_function_class) - with open(tempdir + "/classes/secondclass.bbclass", "w") as f: - f.write(self.export_function_class2) - - with open(recipename, "w") as f: - f.write(self.export_function_recipe) - os.chdir(tempdir) - d = bb.parse.handle(recipename, bb.data.createCopy(self.d))[''] - self.assertIn("someclass_do_compile", d.getVar("do_compile")) - self.assertIn("someclass_do_compilepython", d.getVar("do_compilepython")) - check_function_flags(d) - - recipename2 = tempdir + "/recipe2.bb" - with open(recipename2, "w") as f: - f.write(self.export_function_recipe2) - - d = bb.parse.handle(recipename2, bb.data.createCopy(self.d))[''] - self.assertNotIn("someclass_do_compile", d.getVar("do_compile")) - self.assertNotIn("someclass_do_compilepython", d.getVar("do_compilepython")) - self.assertIn("false", d.getVar("do_compile")) - self.assertIn("else", d.getVar("do_compilepython")) - check_function_flags(d) - - with open(recipename, "a+") as f: - f.write("\ninherit secondclass\n") - with open(recipename2, "a+") as f: - f.write("\ninherit secondclass\n") - - d = bb.parse.handle(recipename, bb.data.createCopy(self.d))[''] - self.assertIn("secondclass_do_compile", d.getVar("do_compile")) - self.assertIn("secondclass_do_compilepython", d.getVar("do_compilepython")) - check_function_flags(d) - - d = bb.parse.handle(recipename2, bb.data.createCopy(self.d))[''] - self.assertNotIn("someclass_do_compile", d.getVar("do_compile")) - self.assertNotIn("someclass_do_compilepython", d.getVar("do_compilepython")) - self.assertIn("false", d.getVar("do_compile")) - self.assertIn("else", d.getVar("do_compilepython")) - check_function_flags(d) - - export_function_unclosed_tab = """ -do_compile () { - bb.note("Something") -\t} -""" - export_function_unclosed_space = """ -do_compile () { - bb.note("Something") - } -""" - export_function_residue = """ -do_compile () { - bb.note("Something") -} - -include \\ -""" - - def test_unclosed_functions(self): - def test_helper(content, expected_error): - with tempfile.TemporaryDirectory() as tempdir: - recipename = tempdir + "/recipe_unclosed.bb" - with open(recipename, "w") as f: - f.write(content) - os.chdir(tempdir) - with self.assertRaises(bb.parse.ParseError) as error: - bb.parse.handle(recipename, bb.data.createCopy(self.d)) - self.assertIn(expected_error, str(error.exception)) - - with tempfile.TemporaryDirectory() as tempdir: - test_helper(self.export_function_unclosed_tab, "Unparsed lines from unclosed function") - test_helper(self.export_function_unclosed_space, "Unparsed lines from unclosed function") - test_helper(self.export_function_residue, "Unparsed lines") - - recipename_closed = tempdir + "/recipe_closed.bb" - with open(recipename_closed, "w") as in_file: - lines = self.export_function_unclosed_tab.split("\n") - lines[3] = "}" - in_file.write("\n".join(lines)) - bb.parse.handle(recipename_closed, bb.data.createCopy(self.d)) - - special_character_assignment = """ -A+="a" -A+ = "b" -+ = "c" -""" - ambigous_assignment = """ -+= "d" -""" - def test_parse_special_character_assignment(self): - with self.parsehelper(self.special_character_assignment) as f: - d = bb.parse.handle(f.name, self.d)[''] - self.assertEqual(d.getVar("A"), " a") - self.assertEqual(d.getVar("A+"), "b") - self.assertEqual(d.getVar("+"), "c") - - with self.parsehelper(self.ambigous_assignment) as f: - with self.assertRaises(bb.parse.ParseError) as error: - bb.parse.handle(f.name, self.d) - self.assertIn("Empty variable name in assignment", str(error.exception)) - - someconf1 = """ -EXTRA_OECONF:append = " foo" -""" - - someconf2 = """ -EXTRA_OECONF:append = " bar" -""" - - someconf3 = """ -EXTRA_OECONF:append = " foobar" -""" - - def test_include_and_require(self): - def test_helper(content, result): - with self.parsehelper(content) as f: - if isinstance(result, type) and issubclass(result, Exception): - with self.assertRaises(result): - d = bb.parse.handle(f.name, bb.data.createCopy(self.d))[''] - else: - d = bb.parse.handle(f.name, bb.data.createCopy(self.d))[''] - self.assertEqual(d.getVar("EXTRA_OECONF"), result) - - with tempfile.TemporaryDirectory() as tempdir: - os.makedirs(tempdir + "/conf1") - os.makedirs(tempdir + "/conf2") - - with open(tempdir + "/conf1/some.conf", "w") as f: - f.write(self.someconf1) - with open(tempdir + "/conf2/some.conf", "w") as f: - f.write(self.someconf2) - with open(tempdir + "/conf2/some3.conf", "w") as f: - f.write(self.someconf3) - - self.d.setVar("BBPATH", tempdir + "/conf1" + ":" + tempdir + "/conf2") - - test_helper("include some.conf", " foo") - test_helper("include someother.conf", None) - test_helper("include some3.conf", " foobar") - test_helper("include ${@''}", None) - test_helper("include " + tempdir + "/conf2/some.conf", " bar") - - test_helper("require some.conf", " foo") - test_helper("require someother.conf", bb.parse.ParseError) - test_helper("require some3.conf", " foobar") - test_helper("require ${@''}", None) - test_helper("require " + tempdir + "/conf2/some.conf", " bar") - - test_helper("include_all some.conf", " foo bar") - test_helper("include_all someother.conf", None) - test_helper("include_all some3.conf", " foobar") - - self.d.setVar("BBPATH", tempdir + "/conf2" + ":" + tempdir + "/conf1") - - test_helper("include some.conf", " bar") - test_helper("include some3.conf", " foobar") - test_helper("require some.conf", " bar") - test_helper("require some3.conf", " foobar") - test_helper("include_all some.conf", " bar foo") - test_helper("include_all some3.conf", " foobar") diff --git a/bitbake/lib/bb/tests/runqueue-tests/classes/base.bbclass b/bitbake/lib/bb/tests/runqueue-tests/classes/base.bbclass deleted file mode 100644 index 3a3db55d2c..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/classes/base.bbclass +++ /dev/null @@ -1,262 +0,0 @@ -SLOWTASKS ??= "" -SSTATEVALID ??= "" - -def stamptask(d): - import time - - thistask = d.expand("${PN}:${BB_CURRENTTASK}") - stampname = d.expand("${TOPDIR}/%s.run" % thistask) - with open(stampname, "a+") as f: - f.write(d.getVar("BB_UNIHASH") + "\n") - - if d.getVar("BB_CURRENT_MC") != "": - thistask = d.expand("${BB_CURRENT_MC}:${PN}:${BB_CURRENTTASK}") - if thistask in d.getVar("SLOWTASKS").split(): - bb.note("Slowing task %s" % thistask) - time.sleep(0.5) - if d.getVar("BB_HASHSERVE"): - task = d.getVar("BB_CURRENTTASK") - if task in ['package', 'package_qa', 'packagedata', 'package_write_ipk', 'package_write_rpm', 'populate_lic', 'populate_sysroot']: - bb.parse.siggen.report_unihash(os.getcwd(), d.getVar("BB_CURRENTTASK"), d) - - with open(d.expand("${TOPDIR}/task.log"), "a+") as f: - f.write(thistask + "\n") - - -def sstate_output_hash(path, sigfile, task, d): - import hashlib - h = hashlib.sha256() - h.update(d.expand("${PN}:${BB_CURRENTTASK}").encode('utf-8')) - return h.hexdigest() - -python do_fetch() { - # fetch - stamptask(d) -} -python do_unpack() { - # unpack - stamptask(d) -} -python do_patch() { - # patch - stamptask(d) -} -python do_populate_lic() { - # populate_lic - stamptask(d) -} -python do_prepare_recipe_sysroot() { - # prepare_recipe_sysroot - stamptask(d) -} -python do_configure() { - # configure - stamptask(d) -} -python do_compile() { - # compile - stamptask(d) -} -python do_install() { - # install - stamptask(d) -} -python do_populate_sysroot() { - # populate_sysroot - stamptask(d) -} -python do_package() { - # package - stamptask(d) -} -python do_package_write_ipk() { - # package_write_ipk - stamptask(d) -} -python do_package_write_rpm() { - # package_write_rpm - stamptask(d) -} -python do_packagedata() { - # packagedata - stamptask(d) -} -python do_package_qa() { - # package_qa - stamptask(d) -} -python do_build() { - # build - stamptask(d) -} -do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot" -do_package[deptask] += "do_packagedata" -do_build[recrdeptask] += "do_deploy" -do_build[recrdeptask] += "do_package_write_ipk" -do_build[recrdeptask] += "do_package_write_rpm" -do_package_qa[rdeptask] = "do_packagedata" -do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy" - -DEBIANRDEP = "do_packagedata" -do_package_write_ipk[rdeptask] = "${DEBIANRDEP}" -do_package_write_rpm[rdeptask] = "${DEBIANRDEP}" - -addtask fetch -addtask unpack after do_fetch -addtask patch after do_unpack -addtask prepare_recipe_sysroot after do_patch -addtask configure after do_prepare_recipe_sysroot -addtask compile after do_configure -addtask install after do_compile -addtask populate_sysroot after do_install -addtask package after do_install -addtask package_write_ipk after do_packagedata do_package -addtask package_write_rpm after do_packagedata do_package -addtask packagedata after do_package -addtask package_qa after do_package -addtask build after do_package_qa do_package_write_rpm do_package_write_ipk do_populate_sysroot - -python do_package_setscene() { - stamptask(d) -} -python do_package_qa_setscene() { - stamptask(d) -} -python do_package_write_ipk_setscene() { - stamptask(d) -} -python do_package_write_rpm_setscene() { - stamptask(d) -} -python do_packagedata_setscene() { - stamptask(d) -} -python do_populate_lic_setscene() { - stamptask(d) -} -python do_populate_sysroot_setscene() { - stamptask(d) -} - -addtask package_setscene -addtask package_qa_setscene -addtask package_write_ipk_setscene -addtask package_write_rpm_setscene -addtask packagedata_setscene -addtask populate_lic_setscene -addtask populate_sysroot_setscene - -BB_SETSCENE_DEPVALID = "setscene_depvalid" - -def setscene_depvalid(task, taskdependees, notneeded, d, log=None): - # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME] - # task is included in taskdependees too - # Return - False - We need this dependency - # - True - We can skip this dependency - import re - - def logit(msg, log): - if log is not None: - log.append(msg) - else: - bb.debug(2, msg) - - logit("Considering setscene task: %s" % (str(taskdependees[task])), log) - - def isNativeCross(x): - return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross") - - # We only need to trigger populate_lic through direct dependencies - if taskdependees[task][1] == "do_populate_lic": - return True - - # We only need to trigger packagedata through direct dependencies - # but need to preserve packagedata on packagedata links - if taskdependees[task][1] == "do_packagedata": - for dep in taskdependees: - if taskdependees[dep][1] == "do_packagedata": - return False - return True - - for dep in taskdependees: - logit(" considering dependency: %s" % (str(taskdependees[dep])), log) - if task == dep: - continue - if dep in notneeded: - continue - # do_package_write_* and do_package doesn't need do_package - if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']: - continue - # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies - if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_ipk', 'do_package_write_rpm']: - return False - # do_package/packagedata/package_qa don't need do_populate_sysroot - if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']: - continue - # Native/Cross packages don't exist and are noexec anyway - if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']: - continue - - # This is due to the [depends] in useradd.bbclass complicating matters - # The logic *is* reversed here due to the way hard setscene dependencies are injected - if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot': - continue - - # Consider sysroot depending on sysroot tasks - if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot': - # Native/Cross populate_sysroot need their dependencies - if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]): - return False - # Target populate_sysroot depended on by cross tools need to be installed - if isNativeCross(taskdependees[dep][0]): - return False - # Native/cross tools depended upon by target sysroot are not needed - # Add an exception for shadow-native as required by useradd.bbclass - if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native': - continue - # Target populate_sysroot need their dependencies - return False - - - if taskdependees[dep][1] == "do_populate_lic": - continue - - # Safe fallthrough default - logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log) - return False - return True - -BB_HASHCHECK_FUNCTION = "sstate_checkhashes" - -def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, **kwargs): - - found = set() - missed = set() - - valid = d.getVar("SSTATEVALID").split() - - for tid in sorted(sq_data['hash']): - n = os.path.basename(bb.runqueue.fn_from_tid(tid)).split(".")[0] + ":do_" + bb.runqueue.taskname_from_tid(tid)[3:] - print(n) - stampfile = d.expand("${TOPDIR}/%s.run" % n.replace("do_", "")) - if n in valid: - bb.note("SState: Found valid sstate for %s" % n) - found.add(tid) - elif n + ":" + sq_data['hash'][tid] in valid: - bb.note("SState: Found valid sstate for %s" % n) - found.add(tid) - elif os.path.exists(stampfile): - with open(stampfile, "r") as f: - hash = f.readline().strip() - if hash == sq_data['hash'][tid]: - bb.note("SState: Found valid sstate for %s (already run)" % n) - found.add(tid) - else: - bb.note("SState: sstate hash didn't match previous run for %s (%s vs %s)" % (n, sq_data['hash'][tid], hash)) - missed.add(tid) - else: - missed.add(tid) - bb.note("SState: Found no valid sstate for %s (%s)" % (n, sq_data['hash'][tid])) - - return found - diff --git a/bitbake/lib/bb/tests/runqueue-tests/classes/image.bbclass b/bitbake/lib/bb/tests/runqueue-tests/classes/image.bbclass deleted file mode 100644 index da9ff11064..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/classes/image.bbclass +++ /dev/null @@ -1,5 +0,0 @@ -do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa" -do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa" -do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa -do_rootfs[recrdeptask] += "do_packagedata" -do_rootfs[recrdeptask] += "do_populate_lic" diff --git a/bitbake/lib/bb/tests/runqueue-tests/classes/native.bbclass b/bitbake/lib/bb/tests/runqueue-tests/classes/native.bbclass deleted file mode 100644 index 7eaaee54ad..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/classes/native.bbclass +++ /dev/null @@ -1,2 +0,0 @@ -RECIPERDEPTASK = "do_populate_sysroot" -do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}" diff --git a/bitbake/lib/bb/tests/runqueue-tests/conf/bitbake.conf b/bitbake/lib/bb/tests/runqueue-tests/conf/bitbake.conf deleted file mode 100644 index 05d7fd07dd..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/conf/bitbake.conf +++ /dev/null @@ -1,17 +0,0 @@ -CACHE = "${TOPDIR}/cache" -THISDIR = "${@os.path.dirname(d.getVar('FILE'))}" -COREBASE := "${@os.path.normpath(os.path.dirname(d.getVar('FILE')+'/../../'))}" -EXTRA_BBFILES ?= "" -BBFILES = "${COREBASE}/recipes/*.bb ${EXTRA_BBFILES}" -PROVIDES = "${PN}" -PN = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[0]}" -PF = "${BB_CURRENT_MC}:${PN}" -export PATH -TMPDIR ??= "${TOPDIR}" -STAMP = "${TMPDIR}/stamps/${PN}" -T = "${TMPDIR}/workdir/${PN}/temp" -BB_NUMBER_THREADS = "4" - -BB_BASEHASH_IGNORE_VARS = "BB_CURRENT_MC BB_HASHSERVE TMPDIR TOPDIR SLOWTASKS SSTATEVALID FILE BB_CURRENTTASK" - -include conf/multiconfig/${BB_CURRENT_MC}.conf diff --git a/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc-1.conf b/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc-1.conf deleted file mode 100644 index f34b8dcccf..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc-1.conf +++ /dev/null @@ -1,2 +0,0 @@ -TMPDIR = "${TOPDIR}/mc1/" -BBMASK += "recipes/fails-mc/fails-mc1.bb" diff --git a/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc_2.conf b/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc_2.conf deleted file mode 100644 index c3360fc5c8..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc_2.conf +++ /dev/null @@ -1,2 +0,0 @@ -TMPDIR = "${TOPDIR}/mc2/" -BBMASK += "recipes/fails-mc/fails-mc2.bb" diff --git a/bitbake/lib/bb/tests/runqueue-tests/recipes/a1.bb b/bitbake/lib/bb/tests/runqueue-tests/recipes/a1.bb deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/bitbake/lib/bb/tests/runqueue-tests/recipes/b1.bb b/bitbake/lib/bb/tests/runqueue-tests/recipes/b1.bb deleted file mode 100644 index c0b288e5bc..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/recipes/b1.bb +++ /dev/null @@ -1 +0,0 @@ -DEPENDS = "a1" \ No newline at end of file diff --git a/bitbake/lib/bb/tests/runqueue-tests/recipes/c1.bb b/bitbake/lib/bb/tests/runqueue-tests/recipes/c1.bb deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/bitbake/lib/bb/tests/runqueue-tests/recipes/d1.bb b/bitbake/lib/bb/tests/runqueue-tests/recipes/d1.bb deleted file mode 100644 index 5ba197515b..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/recipes/d1.bb +++ /dev/null @@ -1,3 +0,0 @@ -DEPENDS = "a1" - -do_package_setscene[depends] = "a1:do_populate_sysroot_setscene" diff --git a/bitbake/lib/bb/tests/runqueue-tests/recipes/e1.bb b/bitbake/lib/bb/tests/runqueue-tests/recipes/e1.bb deleted file mode 100644 index 1588bc8a59..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/recipes/e1.bb +++ /dev/null @@ -1 +0,0 @@ -DEPENDS = "b1" \ No newline at end of file diff --git a/bitbake/lib/bb/tests/runqueue-tests/recipes/f1.bb b/bitbake/lib/bb/tests/runqueue-tests/recipes/f1.bb deleted file mode 100644 index 7b8fc592ab..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/recipes/f1.bb +++ /dev/null @@ -1 +0,0 @@ -do_install[mcdepends] = "mc:mc-1:mc_2:a1:do_build" diff --git a/bitbake/lib/bb/tests/runqueue-tests/recipes/fails-mc/fails-mc1.bb b/bitbake/lib/bb/tests/runqueue-tests/recipes/fails-mc/fails-mc1.bb deleted file mode 100644 index eed69c805a..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/recipes/fails-mc/fails-mc1.bb +++ /dev/null @@ -1,5 +0,0 @@ -python () { - if d.getVar("BB_CURRENT_MC") == "mc-1": - bb.fatal("Multiconfig is mc-1") -} - diff --git a/bitbake/lib/bb/tests/runqueue-tests/recipes/fails-mc/fails-mc2.bb b/bitbake/lib/bb/tests/runqueue-tests/recipes/fails-mc/fails-mc2.bb deleted file mode 100644 index 3c172ef974..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/recipes/fails-mc/fails-mc2.bb +++ /dev/null @@ -1,4 +0,0 @@ -python () { - if d.getVar("BB_CURRENT_MC") == "mc_2": - bb.fatal("Multiconfig is mc_2") -} diff --git a/bitbake/lib/bb/tests/runqueue-tests/recipes/g1.bb b/bitbake/lib/bb/tests/runqueue-tests/recipes/g1.bb deleted file mode 100644 index 3c7dca0257..0000000000 --- a/bitbake/lib/bb/tests/runqueue-tests/recipes/g1.bb +++ /dev/null @@ -1,2 +0,0 @@ -do_build[mcdepends] = "mc::mc-1:h1:do_invalid" - diff --git a/bitbake/lib/bb/tests/runqueue-tests/recipes/h1.bb b/bitbake/lib/bb/tests/runqueue-tests/recipes/h1.bb deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/bitbake/lib/bb/tests/runqueue.py b/bitbake/lib/bb/tests/runqueue.py deleted file mode 100644 index 74f5ded2e6..0000000000 --- a/bitbake/lib/bb/tests/runqueue.py +++ /dev/null @@ -1,410 +0,0 @@ -# -# BitBake Tests for runqueue task processing -# -# Copyright (C) 2019 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import unittest -import os -import tempfile -import subprocess -import sys -import time - -# -# TODO: -# Add tests on task ordering (X happens before Y after Z) -# - -class RunQueueTests(unittest.TestCase): - - alltasks = ['package', 'fetch', 'unpack', 'patch', 'prepare_recipe_sysroot', 'configure', - 'compile', 'install', 'packagedata', 'package_qa', 'package_write_rpm', 'package_write_ipk', - 'populate_sysroot', 'build'] - a1_sstatevalid = "a1:do_package a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_package_write_rpm a1:do_populate_lic a1:do_populate_sysroot" - b1_sstatevalid = "b1:do_package b1:do_package_qa b1:do_packagedata b1:do_package_write_ipk b1:do_package_write_rpm b1:do_populate_lic b1:do_populate_sysroot" - - def run_bitbakecmd(self, cmd, builddir, sstatevalid="", slowtasks="", extraenv=None, cleanup=False, allowfailure=False): - env = os.environ.copy() - env["BBPATH"] = os.path.realpath(os.path.join(os.path.dirname(__file__), "runqueue-tests")) - env["BB_ENV_PASSTHROUGH_ADDITIONS"] = "SSTATEVALID SLOWTASKS TOPDIR" - env["SSTATEVALID"] = sstatevalid - env["SLOWTASKS"] = slowtasks - env["TOPDIR"] = builddir - if extraenv: - for k in extraenv: - env[k] = extraenv[k] - env["BB_ENV_PASSTHROUGH_ADDITIONS"] = env["BB_ENV_PASSTHROUGH_ADDITIONS"] + " " + k - try: - output = subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT,universal_newlines=True, cwd=builddir) - print(output) - except subprocess.CalledProcessError as e: - if allowfailure: - return e.output - self.fail("Command %s failed with %s" % (cmd, e.output)) - tasks = [] - tasklog = builddir + "/task.log" - if os.path.exists(tasklog): - with open(tasklog, "r") as f: - tasks = [line.rstrip() for line in f] - if cleanup: - os.remove(tasklog) - return tasks - - def test_no_setscenevalid(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "a1"] - sstatevalid = "" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:' + x for x in self.alltasks] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_single_setscenevalid(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "a1"] - sstatevalid = "a1:do_package" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:package_setscene', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure', - 'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_qa', 'a1:package_write_rpm', 'a1:package_write_ipk', - 'a1:populate_sysroot', 'a1:build'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_intermediate_setscenevalid(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "a1"] - sstatevalid = "a1:do_package a1:do_populate_sysroot" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:package_setscene', 'a1:packagedata', 'a1:package_qa', 'a1:package_write_rpm', 'a1:package_write_ipk', - 'a1:populate_sysroot_setscene', 'a1:build'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_intermediate_notcovered(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "a1"] - sstatevalid = "a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_package_write_rpm a1:do_populate_lic a1:do_populate_sysroot" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene', - 'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_all_setscenevalid(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "a1"] - sstatevalid = self.a1_sstatevalid - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene', - 'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_no_settasks(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "a1", "-c", "patch"] - sstatevalid = self.a1_sstatevalid - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:fetch', 'a1:unpack', 'a1:patch'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_mix_covered_notcovered(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "a1:do_patch", "a1:do_populate_sysroot"] - sstatevalid = self.a1_sstatevalid - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:fetch', 'a1:unpack', 'a1:patch', 'a1:populate_sysroot_setscene'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - # Test targets with intermediate setscene tasks alongside a target with no intermediate setscene tasks - def test_mixed_direct_tasks_setscene_tasks(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "c1:do_patch", "a1"] - sstatevalid = self.a1_sstatevalid - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['c1:fetch', 'c1:unpack', 'c1:patch', 'a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene', - 'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - # This test slows down the execution of do_package_setscene until after other real tasks have - # started running which tests for a bug where tasks were being lost from the buildable list of real - # tasks if they weren't in tasks_covered or tasks_notcovered - def test_slow_setscene(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "a1"] - sstatevalid = "a1:do_package" - slowtasks = "a1:package_setscene" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, slowtasks) - expected = ['a1:package_setscene', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure', - 'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_qa', 'a1:package_write_rpm', 'a1:package_write_ipk', - 'a1:populate_sysroot', 'a1:build'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_setscene_ignore_tasks(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "a1"] - extraenv = { - "BB_SETSCENE_ENFORCE" : "1", - "BB_SETSCENE_ENFORCE_IGNORE_TASKS" : "a1:do_package_write_rpm a1:do_build" - } - sstatevalid = "a1:do_package a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_populate_lic a1:do_populate_sysroot" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv) - expected = ['a1:packagedata_setscene', 'a1:package_qa_setscene', 'a1:package_write_ipk_setscene', - 'a1:populate_sysroot_setscene', 'a1:package_setscene'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - # Tests for problems with dependencies between setscene tasks - def test_no_setscenevalid_harddeps(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "d1"] - sstatevalid = "" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:package', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure', - 'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk', - 'a1:populate_sysroot', 'd1:package', 'd1:fetch', 'd1:unpack', 'd1:patch', 'd1:prepare_recipe_sysroot', 'd1:configure', - 'd1:compile', 'd1:install', 'd1:packagedata', 'd1:package_qa', 'd1:package_write_rpm', 'd1:package_write_ipk', - 'd1:populate_sysroot', 'd1:build'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_no_setscenevalid_withdeps(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "b1"] - sstatevalid = "" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] - expected.remove('a1:build') - expected.remove('a1:package_qa') - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_single_a1_setscenevalid_withdeps(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "b1"] - sstatevalid = "a1:do_package" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:package_setscene', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure', - 'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk', - 'a1:populate_sysroot'] + ['b1:' + x for x in self.alltasks] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_single_b1_setscenevalid_withdeps(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "b1"] - sstatevalid = "b1:do_package" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:package', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure', - 'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk', - 'a1:populate_sysroot', 'b1:package_setscene'] + ['b1:' + x for x in self.alltasks] - expected.remove('b1:package') - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_intermediate_setscenevalid_withdeps(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "b1"] - sstatevalid = "a1:do_package a1:do_populate_sysroot b1:do_package" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:package_setscene', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk', - 'a1:populate_sysroot_setscene', 'b1:package_setscene'] + ['b1:' + x for x in self.alltasks] - expected.remove('b1:package') - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_all_setscenevalid_withdeps(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - cmd = ["bitbake", "b1"] - sstatevalid = self.a1_sstatevalid + " " + self.b1_sstatevalid - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid) - expected = ['a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene', - 'b1:build', 'a1:populate_sysroot_setscene', 'b1:package_write_ipk_setscene', 'b1:package_write_rpm_setscene', - 'b1:packagedata_setscene', 'b1:package_qa_setscene', 'b1:populate_sysroot_setscene'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_multiconfig_setscene_optimise(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - extraenv = { - "BBMULTICONFIG" : "mc-1 mc_2", - "BB_SIGNATURE_HANDLER" : "basic" - } - cmd = ["bitbake", "b1", "mc:mc-1:b1", "mc:mc_2:b1"] - setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene', - 'populate_sysroot_setscene', 'package_qa_setscene'] - sstatevalid = "" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv) - expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + \ - ['mc-1:b1:' + x for x in setscenetasks] + ['mc-1:a1:' + x for x in setscenetasks] + \ - ['mc_2:b1:' + x for x in setscenetasks] + ['mc_2:a1:' + x for x in setscenetasks] + \ - ['mc-1:b1:build', 'mc_2:b1:build'] - for x in ['mc-1:a1:package_qa_setscene', 'mc_2:a1:package_qa_setscene', 'a1:build', 'a1:package_qa']: - expected.remove(x) - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_multiconfig_bbmask(self): - # This test validates that multiconfigs can independently mask off - # recipes they do not want with BBMASK. It works by having recipes - # that will fail to parse for mc-1 and mc_2, then making each multiconfig - # build the one that does parse. This ensures that the recipes are in - # each multiconfigs BBFILES, but each is masking only the one that - # doesn't parse - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - extraenv = { - "BBMULTICONFIG" : "mc-1 mc_2", - "BB_SIGNATURE_HANDLER" : "basic", - "EXTRA_BBFILES": "${COREBASE}/recipes/fails-mc/*.bb", - } - cmd = ["bitbake", "mc:mc-1:fails-mc2", "mc:mc_2:fails-mc1"] - self.run_bitbakecmd(cmd, tempdir, "", extraenv=extraenv) - - self.shutdown(tempdir) - - def test_multiconfig_mcdepends(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - extraenv = { - "BBMULTICONFIG" : "mc-1 mc_2", - "BB_SIGNATURE_HANDLER" : "basichash", - "EXTRA_BBFILES": "${COREBASE}/recipes/fails-mc/*.bb", - } - tasks = self.run_bitbakecmd(["bitbake", "mc:mc-1:f1"], tempdir, "", extraenv=extraenv, cleanup=True) - expected = ["mc-1:f1:%s" % t for t in self.alltasks] + \ - ["mc_2:a1:%s" % t for t in self.alltasks] - self.assertEqual(set(tasks), set(expected)) - - # A rebuild does nothing - tasks = self.run_bitbakecmd(["bitbake", "mc:mc-1:f1"], tempdir, "", extraenv=extraenv, cleanup=True) - self.assertEqual(set(tasks), set()) - - # Test that a signature change in the dependent task causes - # mcdepends to rebuild - tasks = self.run_bitbakecmd(["bitbake", "mc:mc_2:a1", "-c", "compile", "-f"], tempdir, "", extraenv=extraenv, cleanup=True) - expected = ["mc_2:a1:compile"] - self.assertEqual(set(tasks), set(expected)) - - rerun_tasks = self.alltasks[:] - for x in ("fetch", "unpack", "patch", "prepare_recipe_sysroot", "configure", "compile"): - rerun_tasks.remove(x) - tasks = self.run_bitbakecmd(["bitbake", "mc:mc-1:f1"], tempdir, "", extraenv=extraenv, cleanup=True) - expected = ["mc-1:f1:%s" % t for t in rerun_tasks] + \ - ["mc_2:a1:%s" % t for t in rerun_tasks] - self.assertEqual(set(tasks), set(expected)) - - # Check that a multiconfig that doesn't exist rasies a correct error message - error_output = self.run_bitbakecmd(["bitbake", "g1"], tempdir, "", extraenv=extraenv, cleanup=True, allowfailure=True) - self.assertIn("non-existent task", error_output) - # If the word 'Traceback' or 'KeyError' is in the output we've regressed - self.assertNotIn("Traceback", error_output) - self.assertNotIn("KeyError", error_output) - - self.shutdown(tempdir) - - def test_hashserv_single(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - extraenv = { - "BB_HASHSERVE" : "auto", - "BB_SIGNATURE_HANDLER" : "TestEquivHash" - } - cmd = ["bitbake", "a1", "b1"] - setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene', - 'populate_sysroot_setscene', 'package_qa_setscene'] - sstatevalid = "" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True) - expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] - self.assertEqual(set(tasks), set(expected)) - cmd = ["bitbake", "a1", "-c", "install", "-f"] - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True) - expected = ['a1:install'] - self.assertEqual(set(tasks), set(expected)) - cmd = ["bitbake", "a1", "b1"] - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True) - expected = ['a1:populate_sysroot', 'a1:package', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene', - 'a1:package_write_ipk_setscene', 'a1:package_qa_setscene', 'a1:build'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_hashserv_double(self): - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - extraenv = { - "BB_HASHSERVE" : "auto", - "BB_SIGNATURE_HANDLER" : "TestEquivHash" - } - cmd = ["bitbake", "a1", "b1", "e1"] - setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene', - 'populate_sysroot_setscene', 'package_qa_setscene'] - sstatevalid = "" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True) - expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + ['e1:' + x for x in self.alltasks] - self.assertEqual(set(tasks), set(expected)) - cmd = ["bitbake", "a1", "b1", "-c", "install", "-fn"] - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True) - cmd = ["bitbake", "e1"] - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True) - expected = ['a1:package', 'a1:install', 'b1:package', 'b1:install', 'a1:populate_sysroot', 'b1:populate_sysroot', - 'a1:package_write_ipk_setscene', 'b1:packagedata_setscene', 'b1:package_write_rpm_setscene', - 'a1:package_write_rpm_setscene', 'b1:package_write_ipk_setscene', 'a1:packagedata_setscene'] - self.assertEqual(set(tasks), set(expected)) - - self.shutdown(tempdir) - - def test_hashserv_multiple_setscene(self): - # Runs e1:do_package_setscene twice - with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir: - extraenv = { - "BB_HASHSERVE" : "auto", - "BB_SIGNATURE_HANDLER" : "TestEquivHash" - } - cmd = ["bitbake", "a1", "b1", "e1"] - setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene', - 'populate_sysroot_setscene', 'package_qa_setscene'] - sstatevalid = "" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True) - expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + ['e1:' + x for x in self.alltasks] - self.assertEqual(set(tasks), set(expected)) - cmd = ["bitbake", "a1", "b1", "-c", "install", "-fn"] - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True) - cmd = ["bitbake", "e1"] - sstatevalid = "e1:do_package" - tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True, slowtasks="a1:populate_sysroot b1:populate_sysroot") - expected = ['a1:package', 'a1:install', 'b1:package', 'b1:install', 'a1:populate_sysroot', 'b1:populate_sysroot', - 'a1:package_write_ipk_setscene', 'b1:packagedata_setscene', 'b1:package_write_rpm_setscene', - 'a1:package_write_rpm_setscene', 'b1:package_write_ipk_setscene', 'a1:packagedata_setscene', - 'e1:package_setscene'] - self.assertEqual(set(tasks), set(expected)) - for i in expected: - self.assertEqual(tasks.count(i), 1, "%s not in task list once" % i) - - self.shutdown(tempdir) - - def shutdown(self, tempdir): - # Wait for the hashserve socket to disappear else we'll see races with the tempdir cleanup - while (os.path.exists(tempdir + "/hashserve.sock") or os.path.exists(tempdir + "cache/hashserv.db-wal") or os.path.exists(tempdir + "/bitbake.lock")): - time.sleep(0.5) - diff --git a/bitbake/lib/bb/tests/setup.py b/bitbake/lib/bb/tests/setup.py deleted file mode 100644 index 6e9bf03654..0000000000 --- a/bitbake/lib/bb/tests/setup.py +++ /dev/null @@ -1,360 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -from bb.tests.fetch import FetcherTest -import json - -class BitbakeSetupTest(FetcherTest): - def setUp(self): - super(BitbakeSetupTest, self).setUp() - - self.registrypath = os.path.join(self.tempdir, "bitbake-setup-configurations") - - os.makedirs(self.registrypath) - self.git_init(cwd=self.registrypath) - self.git('commit --allow-empty -m "Initial commit"', cwd=self.registrypath) - - self.testrepopath = os.path.join(self.tempdir, "test-repo") - os.makedirs(self.testrepopath) - self.git_init(cwd=self.testrepopath) - self.git('commit --allow-empty -m "Initial commit"', cwd=self.testrepopath) - - oeinitbuildenv = """BBPATH=$1 -export BBPATH -PATH={}:$PATH -""".format(os.path.join(self.testrepopath, 'scripts')) - self.add_file_to_testrepo('oe-init-build-env',oeinitbuildenv, script=True) - - oesetupbuild = """#!/usr/bin/env python3 -import getopt -import sys -import os -import shutil -opts, args = getopt.getopt(sys.argv[2:], "c:b:", ["no-shell"]) -for option, value in opts: - if option == '-c': - template = value - if option == '-b': - builddir = value -confdir = os.path.join(builddir, 'conf') -os.makedirs(confdir, exist_ok=True) -with open(os.path.join(confdir, 'conf-summary.txt'), 'w') as f: - f.write(template) -shutil.copy(os.path.join(os.path.dirname(__file__), 'test-repo/test-file'), confdir) -with open(os.path.join(builddir, 'init-build-env'), 'w') as f: - f.write("BBPATH={}\\nexport BBPATH\\nPATH={}:$PATH".format(builddir, os.path.join(os.path.dirname(__file__), 'test-repo/scripts'))) -""" - self.add_file_to_testrepo('scripts/oe-setup-build', oesetupbuild, script=True) - - installbuildtools = """#!/usr/bin/env python3 -import getopt -import sys -import os - -opts, args = getopt.getopt(sys.argv[1:], "d:", ["downloads-directory="]) -for option, value in opts: - if option == '-d': - installdir = value - -print("Buildtools installed into {}".format(installdir)) -os.makedirs(installdir) -""" - self.add_file_to_testrepo('scripts/install-buildtools', installbuildtools, script=True) - - bitbakeconfigbuild = """#!/usr/bin/env python3 -import os -import sys -confdir = os.path.join(os.environ['BBPATH'], 'conf') -fragment = sys.argv[2] -with open(os.path.join(confdir, fragment), 'w') as f: - f.write('') -""" - self.add_file_to_testrepo('scripts/bitbake-config-build', bitbakeconfigbuild, script=True) - - sometargetexecutable_template = """#!/usr/bin/env python3 -import os -print("This is {}") -print("BBPATH is {{}}".format(os.environ["BBPATH"])) -""" - for e_name in ("some-target-executable-1", "some-target-executable-2"): - sometargetexecutable = sometargetexecutable_template.format(e_name) - self.add_file_to_testrepo('scripts/{}'.format(e_name), sometargetexecutable, script=True) - - def runbbsetup(self, cmd): - bbsetup = os.path.abspath(os.path.dirname(__file__) + "/../../../bin/bitbake-setup") - return bb.process.run("{} --global-settings {} {}".format(bbsetup, os.path.join(self.tempdir, 'global-config'), cmd)) - - def add_json_config_to_registry(self, name, rev, branch): - config = """ -{ - "sources": { - "test-repo": { - "git-remote": { - "remotes": { - "origin": { - "uri": "file://%s" - } - }, - "branch": "%s", - "rev": "%s" - }, - "path": "test-repo" - } - }, - "description": "Test configuration", - "bitbake-setup": { - "configurations": [ - { - "name": "gadget", - "description": "Gadget configuration", - "oe-template": "test-configuration-gadget", - "oe-fragments": ["test-fragment-1"] - }, - { - "name": "gizmo", - "description": "Gizmo configuration", - "oe-template": "test-configuration-gizmo", - "oe-fragments": ["test-fragment-2"] - }, - { - "name": "gizmo-env-passthrough", - "description": "Gizmo configuration with environment-passthrough", - "bb-layers": ["layerC","layerD/meta-layer"], - "oe-fragments": ["test-fragment-1"], - "bb-env-passthrough-additions": [ - "BUILD_ID", - "BUILD_DATE", - "BUILD_SERVER" - ] - }, - { - "name": "gizmo-no-fragment", - "description": "Gizmo no-fragment template-only configuration", - "oe-template": "test-configuration-gizmo" - }, - { - "name": "gadget-notemplate", - "description": "Gadget notemplate configuration", - "bb-layers": ["layerA","layerB/meta-layer"], - "oe-fragments": ["test-fragment-1"] - }, - { - "name": "gizmo-notemplate", - "description": "Gizmo notemplate configuration", - "bb-layers": ["layerC","layerD/meta-layer"], - "oe-fragments": ["test-fragment-2"] - }, - { - "name": "gizmo-notemplate-with-filerelative-layers", - "description": "Gizmo notemplate configuration using filerelative layers", - "bb-layers": ["layerC","layerD/meta-layer"], - "bb-layers-file-relative": ["layerE/meta-layer"], - "oe-fragments": ["test-fragment-2"] - } - ] - }, - "version": "1.0" -} -""" % (self.testrepopath, branch, rev) - os.makedirs(os.path.join(self.registrypath, os.path.dirname(name)), exist_ok=True) - with open(os.path.join(self.registrypath, name), 'w') as f: - f.write(config) - self.git('add {}'.format(name), cwd=self.registrypath) - self.git('commit -m "Adding {}"'.format(name), cwd=self.registrypath) - return json.loads(config) - - def add_file_to_testrepo(self, name, content, script=False): - fullname = os.path.join(self.testrepopath, name) - os.makedirs(os.path.join(self.testrepopath, os.path.dirname(name)), exist_ok=True) - with open(fullname, 'w') as f: - f.write(content) - if script: - import stat - st = os.stat(fullname) - os.chmod(fullname, st.st_mode | stat.S_IEXEC) - self.git('add {}'.format(name), cwd=self.testrepopath) - self.git('commit -m "Adding {}"'.format(name), cwd=self.testrepopath) - - def check_setupdir_files(self, setuppath, test_file_content): - with open(os.path.join(setuppath, 'config', "config-upstream.json")) as f: - config_upstream = json.load(f) - with open(os.path.join(setuppath, 'layers', 'test-repo', 'test-file')) as f: - self.assertEqual(f.read(), test_file_content) - bitbake_config = config_upstream["bitbake-config"] - bb_build_path = os.path.join(setuppath, 'build') - bb_conf_path = os.path.join(bb_build_path, 'conf') - self.assertTrue(os.path.exists(os.path.join(bb_build_path, 'init-build-env'))) - - with open(os.path.join(setuppath, 'config', "sources-fixed-revisions.json")) as f: - sources_fixed_revisions = json.load(f) - self.assertTrue('test-repo' in sources_fixed_revisions['sources'].keys()) - revision = self.git('rev-parse HEAD', cwd=self.testrepopath).strip() - self.assertEqual(revision, sources_fixed_revisions['sources']['test-repo']['git-remote']['rev']) - - if "oe-template" in bitbake_config: - with open(os.path.join(bb_conf_path, 'conf-summary.txt')) as f: - self.assertEqual(f.read(), bitbake_config["oe-template"]) - with open(os.path.join(bb_conf_path, 'test-file')) as f: - self.assertEqual(f.read(), test_file_content) - else: - with open(os.path.join(bb_conf_path, 'conf-summary.txt')) as f: - self.assertIn(bitbake_config["description"], f.read()) - with open(os.path.join(bb_conf_path, 'bblayers.conf')) as f: - bblayers = f.read() - for l in bitbake_config["bb-layers"]: - self.assertIn(os.path.join(setuppath, "layers", l), bblayers) - for l in bitbake_config.get("bb-layers-file-relative") or []: - filerelative_layer = os.path.join( - os.path.dirname(config_upstream["path"]), - l, - ) - self.assertIn(filerelative_layer, bblayers) - - if 'oe-fragment' in bitbake_config.keys(): - for f in bitbake_config["oe-fragments"]: - self.assertTrue(os.path.exists(os.path.join(bb_conf_path, f))) - - if 'bb-environment-passthrough' in bitbake_config.keys(): - with open(os.path.join(bb_build_path, 'init-build-env'), 'r') as f: - init_build_env = f.read() - self.assertTrue('BB_ENV_PASSTHROUGH_ADDITIONS' in init_build_env) - self.assertTrue('BUILD_ID' in init_build_env) - self.assertTrue('BUILD_DATE' in init_build_env) - self.assertTrue('BUILD_SERVER' in init_build_env) - # a more throrough test could be to initialize a bitbake build-env, export FOO to the shell environment, set the env-passthrough on it and finally check against 'bitbake-getvar FOO' - - - def test_setup(self): - # unset BBPATH to ensure tests run in isolation from the existing bitbake environment - import os - if 'BBPATH' in os.environ: - del os.environ['BBPATH'] - - # check that no arguments works - self.runbbsetup("") - - # check that --help works - self.runbbsetup("--help") - - # set up global location for top-dir-prefix - out = self.runbbsetup("settings set --global default top-dir-prefix {}".format(self.tempdir)) - settings_path = "{}/global-config".format(self.tempdir) - self.assertIn(settings_path, out[0]) - self.assertIn("From section 'default' the setting 'top-dir-prefix' was changed to", out[0]) - self.assertIn("Settings written to".format(settings_path), out[0]) - out = self.runbbsetup("settings set --global default dl-dir {}".format(os.path.join(self.tempdir, 'downloads'))) - self.assertIn("From section 'default' the setting 'dl-dir' was changed to", out[0]) - self.assertIn("Settings written to".format(settings_path), out[0]) - - # check that writing settings works and then adjust them to point to - # test registry repo - out = self.runbbsetup("settings set default registry 'git://{};protocol=file;branch=master;rev=master'".format(self.registrypath)) - settings_path = "{}/bitbake-builds/settings.conf".format(self.tempdir) - self.assertIn(settings_path, out[0]) - self.assertIn("From section 'default' the setting 'registry' was changed to", out[0]) - self.assertIn("Settings written to".format(settings_path), out[0]) - - # check that listing settings works - out = self.runbbsetup("settings list") - self.assertIn("default top-dir-prefix {}".format(self.tempdir), out[0]) - self.assertIn("default dl-dir {}".format(os.path.join(self.tempdir, 'downloads')), out[0]) - self.assertIn("default registry {}".format('git://{};protocol=file;branch=master;rev=master'.format(self.registrypath)), out[0]) - - # check that 'list' produces correct output with no configs, one config and two configs - out = self.runbbsetup("list") - self.assertNotIn("test-config-1", out[0]) - self.assertNotIn("test-config-2", out[0]) - - json_1 = self.add_json_config_to_registry('test-config-1.conf.json', 'master', 'master') - out = self.runbbsetup("list") - self.assertIn("test-config-1", out[0]) - self.assertNotIn("test-config-2", out[0]) - - json_2 = self.add_json_config_to_registry('config-2/test-config-2.conf.json', 'master', 'master') - out = self.runbbsetup("list --write-json={}".format(os.path.join(self.tempdir, "test-configs.json"))) - self.assertIn("test-config-1", out[0]) - self.assertIn("test-config-2", out[0]) - with open(os.path.join(self.tempdir, "test-configs.json")) as f: - json_configs = json.load(f) - self.assertIn("test-config-1", json_configs) - self.assertIn("test-config-2", json_configs) - - # check that init/status/update work - # (the latter two should do nothing and say that config hasn't changed) - test_file_content = 'initial\n' - self.add_file_to_testrepo('test-file', test_file_content) - - # test-config-1 is tested as a registry config, test-config-2 as a local file - test_configurations = {'test-config-1': {'cmdline': 'test-config-1', - 'buildconfigs':('gadget','gizmo', - 'gizmo-env-passthrough', - 'gizmo-no-fragment', - 'gadget-notemplate','gizmo-notemplate')}, - 'test-config-2': {'cmdline': os.path.join(self.registrypath,'config-2/test-config-2.conf.json'), - 'buildconfigs': ('gadget','gizmo', - 'gizmo-env-passthrough', - 'gizmo-no-fragment', - 'gadget-notemplate','gizmo-notemplate', - 'gizmo-notemplate-with-filerelative-layers')} - } - for cf, v in test_configurations.items(): - for c in v['buildconfigs']: - out = self.runbbsetup("init --non-interactive {} {}".format(v['cmdline'], c)) - setuppath = os.path.join(self.tempdir, 'bitbake-builds', '{}-{}'.format(cf, c)) - self.check_setupdir_files(setuppath, test_file_content) - os.environ['BBPATH'] = os.path.join(setuppath, 'build') - out = self.runbbsetup("status") - self.assertIn("Configuration in {} has not changed".format(setuppath), out[0]) - out = self.runbbsetup("update") - self.assertIn("Configuration in {} has not changed".format(setuppath), out[0]) - - # install buildtools - out = self.runbbsetup("install-buildtools") - self.assertIn("Buildtools installed into", out[0]) - self.assertTrue(os.path.exists(os.path.join(setuppath, 'buildtools'))) - - # change a file in the test layer repo, make a new commit and - # test that status/update correctly report the change and update the config - prev_test_file_content = test_file_content - test_file_content = 'modified\n' - self.add_file_to_testrepo('test-file', test_file_content) - for c in ('gadget', 'gizmo', - 'gizmo-env-passthrough', - 'gizmo-no-fragment', - 'gadget-notemplate', 'gizmo-notemplate'): - setuppath = os.path.join(self.tempdir, 'bitbake-builds', 'test-config-1-{}'.format(c)) - os.environ['BBPATH'] = os.path.join(setuppath, 'build') - out = self.runbbsetup("status") - self.assertIn("Layer repository file://{} checked out into {}/layers/test-repo updated revision master from".format(self.testrepopath, setuppath), out[0]) - out = self.runbbsetup("update") - if c in ('gadget', 'gizmo'): - self.assertIn("Existing bitbake configuration directory renamed to {}/build/conf-backup.".format(setuppath), out[0]) - self.assertIn('-{}+{}'.format(prev_test_file_content, test_file_content), out[0]) - self.check_setupdir_files(setuppath, test_file_content) - - # make a new branch in the test layer repo, change a file on that branch, - # make a new commit, update the top level json config to refer to that branch, - # and test that status/update correctly report the change and update the config - prev_test_file_content = test_file_content - test_file_content = 'modified-in-branch\n' - branch = "another-branch" - self.git('checkout -b {}'.format(branch), cwd=self.testrepopath) - self.add_file_to_testrepo('test-file', test_file_content) - json_1 = self.add_json_config_to_registry('test-config-1.conf.json', branch, branch) - for c in ('gadget', 'gizmo', - 'gizmo-env-passthrough', - 'gizmo-no-fragment', - 'gadget-notemplate', 'gizmo-notemplate'): - setuppath = os.path.join(self.tempdir, 'bitbake-builds', 'test-config-1-{}'.format(c)) - os.environ['BBPATH'] = os.path.join(setuppath, 'build') - out = self.runbbsetup("status") - self.assertIn("Configuration in {} has changed:".format(setuppath), out[0]) - self.assertIn('- "rev": "master"\n+ "rev": "another-branch"', out[0]) - out = self.runbbsetup("update") - if c in ('gadget', 'gizmo'): - self.assertIn("Existing bitbake configuration directory renamed to {}/build/conf-backup.".format(setuppath), out[0]) - self.assertIn('-{}+{}'.format(prev_test_file_content, test_file_content), out[0]) - self.check_setupdir_files(setuppath, test_file_content) diff --git a/bitbake/lib/bb/tests/siggen.py b/bitbake/lib/bb/tests/siggen.py deleted file mode 100644 index 0dc67e6cc2..0000000000 --- a/bitbake/lib/bb/tests/siggen.py +++ /dev/null @@ -1,28 +0,0 @@ -# -# BitBake Test for lib/bb/siggen.py -# -# Copyright (C) 2020 Jean-François Dagenais -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import unittest -import logging -import bb -import time - -logger = logging.getLogger('BitBake.TestSiggen') - -import bb.siggen - -class SiggenTest(unittest.TestCase): - - def test_build_pnid(self): - tests = { - ('', 'helloworld', 'do_sometask') : 'helloworld:do_sometask', - ('XX', 'helloworld', 'do_sometask') : 'mc:XX:helloworld:do_sometask', - } - - for t in tests: - self.assertEqual(bb.siggen.build_pnid(*t), tests[t]) - diff --git a/bitbake/lib/bb/tests/support/httpserver.py b/bitbake/lib/bb/tests/support/httpserver.py deleted file mode 100644 index 03327e923b..0000000000 --- a/bitbake/lib/bb/tests/support/httpserver.py +++ /dev/null @@ -1,65 +0,0 @@ -# -# SPDX-License-Identifier: MIT -# - -import http.server -from bb import multiprocessing -import os -import traceback -import signal -import logging -from socketserver import ThreadingMixIn - -class HTTPServer(ThreadingMixIn, http.server.HTTPServer): - - def server_start(self, root_dir, logger): - os.chdir(root_dir) - self.serve_forever() - -class HTTPRequestHandler(http.server.SimpleHTTPRequestHandler): - - def log_message(self, format_str, *args): - pass - -class HTTPService(object): - - def __init__(self, root_dir, host='', port=0, logger=None): - self.root_dir = root_dir - self.host = host - self.port = port - if not logger: - logger = logging.getLogger() - self.logger = logger - - def start(self): - print(self.root_dir) - if not os.path.exists(self.root_dir): - self.logger.info("Not starting HTTPService for directory %s which doesn't exist" % (self.root_dir)) - return - - self.server = HTTPServer((self.host, self.port), HTTPRequestHandler) - if self.port == 0: - self.port = self.server.server_port - self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir, self.logger]) - - # The signal handler from testimage.bbclass can cause deadlocks here - # if the HTTPServer is terminated before it can restore the standard - #signal behaviour - orig = signal.getsignal(signal.SIGTERM) - signal.signal(signal.SIGTERM, signal.SIG_DFL) - self.process.start() - signal.signal(signal.SIGTERM, orig) - - if self.logger: - self.logger.info("Started HTTPService on %s:%s" % (self.host, self.port)) - - - def stop(self): - if hasattr(self, "server"): - self.server.server_close() - if hasattr(self, "process"): - self.process.terminate() - self.process.join() - if self.logger: - self.logger.info("Stopped HTTPService on %s:%s" % (self.host, self.port)) - diff --git a/bitbake/lib/bb/tests/utils.py b/bitbake/lib/bb/tests/utils.py deleted file mode 100644 index 52b7bf85bf..0000000000 --- a/bitbake/lib/bb/tests/utils.py +++ /dev/null @@ -1,705 +0,0 @@ -# -# BitBake Tests for utils.py -# -# Copyright (C) 2012 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import unittest -import bb -import os -import tempfile -import re - -class VerCmpString(unittest.TestCase): - - def test_vercmpstring(self): - result = bb.utils.vercmp_string('1', '2') - self.assertTrue(result < 0) - result = bb.utils.vercmp_string('2', '1') - self.assertTrue(result > 0) - result = bb.utils.vercmp_string('1', '1.0') - self.assertTrue(result < 0) - result = bb.utils.vercmp_string('1', '1.1') - self.assertTrue(result < 0) - result = bb.utils.vercmp_string('1.1', '1_p2') - self.assertTrue(result < 0) - result = bb.utils.vercmp_string('1.0', '1.0+1.1-beta1') - self.assertTrue(result < 0) - result = bb.utils.vercmp_string('1.1', '1.0+1.1-beta1') - self.assertTrue(result > 0) - result = bb.utils.vercmp_string('1a', '1a1') - self.assertTrue(result < 0) - result = bb.utils.vercmp_string('1a1', '1a') - self.assertTrue(result > 0) - result = bb.utils.vercmp_string('1.', '1.1') - self.assertTrue(result < 0) - result = bb.utils.vercmp_string('1.1', '1.') - self.assertTrue(result > 0) - - def test_explode_dep_versions(self): - correctresult = {"foo" : ["= 1.10"]} - result = bb.utils.explode_dep_versions2("foo (= 1.10)") - self.assertEqual(result, correctresult) - result = bb.utils.explode_dep_versions2("foo (=1.10)") - self.assertEqual(result, correctresult) - result = bb.utils.explode_dep_versions2("foo ( = 1.10)") - self.assertEqual(result, correctresult) - result = bb.utils.explode_dep_versions2("foo ( =1.10)") - self.assertEqual(result, correctresult) - result = bb.utils.explode_dep_versions2("foo ( = 1.10 )") - self.assertEqual(result, correctresult) - result = bb.utils.explode_dep_versions2("foo ( =1.10 )") - self.assertEqual(result, correctresult) - - def test_vercmp_string_op(self): - compareops = [('1', '1', '=', True), - ('1', '1', '==', True), - ('1', '1', '!=', False), - ('1', '1', '>', False), - ('1', '1', '<', False), - ('1', '1', '>=', True), - ('1', '1', '<=', True), - ('1', '0', '=', False), - ('1', '0', '==', False), - ('1', '0', '!=', True), - ('1', '0', '>', True), - ('1', '0', '<', False), - ('1', '0', '>>', True), - ('1', '0', '<<', False), - ('1', '0', '>=', True), - ('1', '0', '<=', False), - ('0', '1', '=', False), - ('0', '1', '==', False), - ('0', '1', '!=', True), - ('0', '1', '>', False), - ('0', '1', '<', True), - ('0', '1', '>>', False), - ('0', '1', '<<', True), - ('0', '1', '>=', False), - ('0', '1', '<=', True)] - - for arg1, arg2, op, correctresult in compareops: - result = bb.utils.vercmp_string_op(arg1, arg2, op) - self.assertEqual(result, correctresult, 'vercmp_string_op("%s", "%s", "%s") != %s' % (arg1, arg2, op, correctresult)) - - # Check that clearly invalid operator raises an exception - self.assertRaises(bb.utils.VersionStringException, bb.utils.vercmp_string_op, '0', '0', '$') - - -class Path(unittest.TestCase): - def test_unsafe_delete_path(self): - checkitems = [('/', True), - ('//', True), - ('///', True), - (os.getcwd().count(os.sep) * ('..' + os.sep), True), - (os.environ.get('HOME', '/home/test'), True), - ('/home/someone', True), - ('/home/other/', True), - ('/home/other/subdir', False), - ('', False)] - for arg1, correctresult in checkitems: - result = bb.utils._check_unsafe_delete_path(arg1) - self.assertEqual(result, correctresult, '_check_unsafe_delete_path("%s") != %s' % (arg1, correctresult)) - -class Checksum(unittest.TestCase): - filler = b"Shiver me timbers square-rigged spike Gold Road galleon bilge water boatswain wherry jack pirate. Mizzenmast rum lad Privateer jack salmagundi hang the jib piracy Pieces of Eight Corsair. Parrel marooned black spot yawl provost quarterdeck cable no prey, no pay spirits lateen sail." - - def test_md5(self): - import hashlib - with tempfile.NamedTemporaryFile() as f: - f.write(self.filler) - f.flush() - checksum = bb.utils.md5_file(f.name) - self.assertEqual(checksum, "bd572cd5de30a785f4efcb6eaf5089e3") - - def test_sha1(self): - import hashlib - with tempfile.NamedTemporaryFile() as f: - f.write(self.filler) - f.flush() - checksum = bb.utils.sha1_file(f.name) - self.assertEqual(checksum, "249eb8fd654732ea836d5e702d7aa567898eca71") - - def test_sha256(self): - import hashlib - with tempfile.NamedTemporaryFile() as f: - f.write(self.filler) - f.flush() - checksum = bb.utils.sha256_file(f.name) - self.assertEqual(checksum, "fcfbae8bf6b721dbb9d2dc6a9334a58f2031a9a9b302999243f99da4d7f12d0f") - - def test_goh1(self): - import hashlib - with tempfile.NamedTemporaryFile() as f: - f.write(self.filler) - f.flush() - checksum = bb.utils.goh1_file(f.name) - self.assertEqual(checksum, "81191f04d4abf413e5badd234814e4202d9efa73e6f9437e9ddd6b8165b569ef") - -class EditMetadataFile(unittest.TestCase): - _origfile = """ -# A comment -HELLO = "oldvalue" - -THIS = "that" - -# Another comment -NOCHANGE = "samevalue" -OTHER = 'anothervalue' - -MULTILINE = "a1 \\ - a2 \\ - a3" - -MULTILINE2 := " \\ - b1 \\ - b2 \\ - b3 \\ - " - - -MULTILINE3 = " \\ - c1 \\ - c2 \\ - c3 \\ -" - -do_functionname() { - command1 ${VAL1} ${VAL2} - command2 ${VAL3} ${VAL4} -} -""" - def _testeditfile(self, varvalues, compareto, dummyvars=None): - if dummyvars is None: - dummyvars = [] - with tempfile.NamedTemporaryFile('w', delete=False) as tf: - tf.write(self._origfile) - tf.close() - try: - varcalls = [] - def handle_file(varname, origvalue, op, newlines): - self.assertIn(varname, varvalues, 'Callback called for variable %s not in the list!' % varname) - self.assertNotIn(varname, dummyvars, 'Callback called for variable %s in dummy list!' % varname) - varcalls.append(varname) - return varvalues[varname] - - bb.utils.edit_metadata_file(tf.name, varvalues.keys(), handle_file) - with open(tf.name) as f: - modfile = f.readlines() - # Ensure the output matches the expected output - self.assertEqual(compareto.splitlines(True), modfile) - # Ensure the callback function was called for every variable we asked for - # (plus allow testing behaviour when a requested variable is not present) - self.assertEqual(sorted(varvalues.keys()), sorted(varcalls + dummyvars)) - finally: - os.remove(tf.name) - - - def test_edit_metadata_file_nochange(self): - # Test file doesn't get modified with nothing to do - self._testeditfile({}, self._origfile) - # Test file doesn't get modified with only dummy variables - self._testeditfile({'DUMMY1': ('should_not_set', None, 0, True), - 'DUMMY2': ('should_not_set_again', None, 0, True)}, self._origfile, dummyvars=['DUMMY1', 'DUMMY2']) - # Test file doesn't get modified with some the same values - self._testeditfile({'THIS': ('that', None, 0, True), - 'OTHER': ('anothervalue', None, 0, True), - 'MULTILINE3': (' c1 c2 c3 ', None, 4, False)}, self._origfile) - - def test_edit_metadata_file_1(self): - - newfile1 = """ -# A comment -HELLO = "newvalue" - -THIS = "that" - -# Another comment -NOCHANGE = "samevalue" -OTHER = 'anothervalue' - -MULTILINE = "a1 \\ - a2 \\ - a3" - -MULTILINE2 := " \\ - b1 \\ - b2 \\ - b3 \\ - " - - -MULTILINE3 = " \\ - c1 \\ - c2 \\ - c3 \\ -" - -do_functionname() { - command1 ${VAL1} ${VAL2} - command2 ${VAL3} ${VAL4} -} -""" - self._testeditfile({'HELLO': ('newvalue', None, 4, True)}, newfile1) - - - def test_edit_metadata_file_2(self): - - newfile2 = """ -# A comment -HELLO = "oldvalue" - -THIS = "that" - -# Another comment -NOCHANGE = "samevalue" -OTHER = 'anothervalue' - -MULTILINE = " \\ - d1 \\ - d2 \\ - d3 \\ - " - -MULTILINE2 := " \\ - b1 \\ - b2 \\ - b3 \\ - " - - -MULTILINE3 = "nowsingle" - -do_functionname() { - command1 ${VAL1} ${VAL2} - command2 ${VAL3} ${VAL4} -} -""" - self._testeditfile({'MULTILINE': (['d1','d2','d3'], None, 4, False), - 'MULTILINE3': ('nowsingle', None, 4, True), - 'NOTPRESENT': (['a', 'b'], None, 4, False)}, newfile2, dummyvars=['NOTPRESENT']) - - - def test_edit_metadata_file_3(self): - - newfile3 = """ -# A comment -HELLO = "oldvalue" - -# Another comment -NOCHANGE = "samevalue" -OTHER = "yetanothervalue" - -MULTILINE = "e1 \\ - e2 \\ - e3 \\ - " - -MULTILINE2 := "f1 \\ -\tf2 \\ -\t" - - -MULTILINE3 = " \\ - c1 \\ - c2 \\ - c3 \\ -" - -do_functionname() { - othercommand_one a b c - othercommand_two d e f -} -""" - - self._testeditfile({'do_functionname()': (['othercommand_one a b c', 'othercommand_two d e f'], None, 4, False), - 'MULTILINE2': (['f1', 'f2'], None, '\t', True), - 'MULTILINE': (['e1', 'e2', 'e3'], None, -1, True), - 'THIS': (None, None, 0, False), - 'OTHER': ('yetanothervalue', None, 0, True)}, newfile3) - - - def test_edit_metadata_file_4(self): - - newfile4 = """ -# A comment -HELLO = "oldvalue" - -THIS = "that" - -# Another comment -OTHER = 'anothervalue' - -MULTILINE = "a1 \\ - a2 \\ - a3" - -MULTILINE2 := " \\ - b1 \\ - b2 \\ - b3 \\ - " - - -""" - - self._testeditfile({'NOCHANGE': (None, None, 0, False), - 'MULTILINE3': (None, None, 0, False), - 'THIS': ('that', None, 0, False), - 'do_functionname()': (None, None, 0, False)}, newfile4) - - - def test_edit_metadata(self): - newfile5 = """ -# A comment -HELLO = "hithere" - -# A new comment -THIS += "that" - -# Another comment -NOCHANGE = "samevalue" -OTHER = 'anothervalue' - -MULTILINE = "a1 \\ - a2 \\ - a3" - -MULTILINE2 := " \\ - b1 \\ - b2 \\ - b3 \\ - " - - -MULTILINE3 = " \\ - c1 \\ - c2 \\ - c3 \\ -" - -NEWVAR = "value" - -do_functionname() { - command1 ${VAL1} ${VAL2} - command2 ${VAL3} ${VAL4} -} -""" - - - def handle_var(varname, origvalue, op, newlines): - if varname == 'THIS': - newlines.append('# A new comment\n') - elif varname == 'do_functionname()': - newlines.append('NEWVAR = "value"\n') - newlines.append('\n') - valueitem = varvalues.get(varname, None) - if valueitem: - return valueitem - else: - return (origvalue, op, 0, True) - - varvalues = {'HELLO': ('hithere', None, 0, True), 'THIS': ('that', '+=', 0, True)} - varlist = ['HELLO', 'THIS', 'do_functionname()'] - (updated, newlines) = bb.utils.edit_metadata(self._origfile.splitlines(True), varlist, handle_var) - self.assertTrue(updated, 'List should be updated but isn\'t') - self.assertEqual(newlines, newfile5.splitlines(True)) - - # Make sure the orig value matches what we expect it to be - def test_edit_metadata_origvalue(self): - origfile = """ -MULTILINE = " stuff \\ - morestuff" -""" - expected_value = "stuff morestuff" - global value_in_callback - value_in_callback = "" - - def handle_var(varname, origvalue, op, newlines): - global value_in_callback - value_in_callback = origvalue - return (origvalue, op, -1, False) - - bb.utils.edit_metadata(origfile.splitlines(True), - ['MULTILINE'], - handle_var) - - testvalue = re.sub(r'\s+', ' ', value_in_callback.strip()) - self.assertEqual(expected_value, testvalue) - -class EditBbLayersConf(unittest.TestCase): - - def _test_bblayers_edit(self, before, after, add, remove, notadded, notremoved): - with tempfile.NamedTemporaryFile('w', delete=False) as tf: - tf.write(before) - tf.close() - try: - actual_notadded, actual_notremoved = bb.utils.edit_bblayers_conf(tf.name, add, remove) - with open(tf.name) as f: - actual_after = f.readlines() - self.assertEqual(after.splitlines(True), actual_after) - self.assertEqual(notadded, actual_notadded) - self.assertEqual(notremoved, actual_notremoved) - finally: - os.remove(tf.name) - - - def test_bblayers_remove(self): - before = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS = " \ - /home/user/path/layer1 \ - /home/user/path/layer2 \ - /home/user/path/subpath/layer3 \ - /home/user/path/layer4 \ - " -""" - after = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS = " \ - /home/user/path/layer1 \ - /home/user/path/subpath/layer3 \ - /home/user/path/layer4 \ - " -""" - self._test_bblayers_edit(before, after, - None, - '/home/user/path/layer2', - [], - []) - - - def test_bblayers_add(self): - before = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS = " \ - /home/user/path/layer1 \ - /home/user/path/layer2 \ - /home/user/path/subpath/layer3 \ - /home/user/path/layer4 \ - " -""" - after = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS = " \ - /home/user/path/layer1 \ - /home/user/path/layer2 \ - /home/user/path/subpath/layer3 \ - /home/user/path/layer4 \ - /other/path/to/layer5 \ - " -""" - self._test_bblayers_edit(before, after, - '/other/path/to/layer5/', - None, - [], - []) - - - def test_bblayers_add_remove(self): - before = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS = " \ - /home/user/path/layer1 \ - /home/user/path/layer2 \ - /home/user/path/subpath/layer3 \ - /home/user/path/layer4 \ - " -""" - after = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS = " \ - /home/user/path/layer1 \ - /home/user/path/layer2 \ - /home/user/path/layer4 \ - /other/path/to/layer5 \ - " -""" - self._test_bblayers_edit(before, after, - ['/other/path/to/layer5', '/home/user/path/layer2/'], '/home/user/path/subpath/layer3/', - ['/home/user/path/layer2'], - []) - - - def test_bblayers_add_remove_home(self): - before = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS = " \ - ~/path/layer1 \ - ~/path/layer2 \ - ~/otherpath/layer3 \ - ~/path/layer4 \ - " -""" - after = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS = " \ - ~/path/layer2 \ - ~/path/layer4 \ - ~/path2/layer5 \ - " -""" - self._test_bblayers_edit(before, after, - [os.environ['HOME'] + '/path/layer4', '~/path2/layer5'], - [os.environ['HOME'] + '/otherpath/layer3', '~/path/layer1', '~/path/notinlist'], - [os.environ['HOME'] + '/path/layer4'], - ['~/path/notinlist']) - - - def test_bblayers_add_remove_plusequals(self): - before = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS += " \ - /home/user/path/layer1 \ - /home/user/path/layer2 \ - " -""" - after = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS += " \ - /home/user/path/layer2 \ - /home/user/path/layer3 \ - " -""" - self._test_bblayers_edit(before, after, - '/home/user/path/layer3', - '/home/user/path/layer1', - [], - []) - - - def test_bblayers_add_remove_plusequals2(self): - before = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS += " \ - /home/user/path/layer1 \ - /home/user/path/layer2 \ - /home/user/path/layer3 \ - " -BBLAYERS += "/home/user/path/layer4" -BBLAYERS += "/home/user/path/layer5" -""" - after = r""" -# A comment - -BBPATH = "${TOPDIR}" -BBFILES ?= "" -BBLAYERS += " \ - /home/user/path/layer2 \ - /home/user/path/layer3 \ - " -BBLAYERS += "/home/user/path/layer5" -BBLAYERS += "/home/user/otherpath/layer6" -""" - self._test_bblayers_edit(before, after, - ['/home/user/otherpath/layer6', '/home/user/path/layer3'], ['/home/user/path/layer1', '/home/user/path/layer4', '/home/user/path/layer7'], - ['/home/user/path/layer3'], - ['/home/user/path/layer7']) - - -class GetReferencedVars(unittest.TestCase): - def setUp(self): - self.d = bb.data.init() - - def check_referenced(self, expression, expected_layers): - vars = bb.utils.get_referenced_vars(expression, self.d) - - # Do the easy check first - is every variable accounted for? - expected_vars = set.union(set(), *expected_layers) - got_vars = set(vars) - self.assertSetEqual(got_vars, expected_vars) - - # Now test the order of the layers - start = 0 - for i, expected_layer in enumerate(expected_layers): - got_layer = set(vars[start:len(expected_layer)+start]) - start += len(expected_layer) - self.assertSetEqual(got_layer, expected_layer) - - def test_no_vars(self): - self.check_referenced("", []) - self.check_referenced(" ", []) - self.check_referenced(" no vars here! ", []) - - def test_single_layer(self): - self.check_referenced("${VAR}", [{"VAR"}]) - self.check_referenced("${VAR} ${VAR}", [{"VAR"}]) - - def test_two_layer(self): - self.d.setVar("VAR", "${B}") - self.check_referenced("${VAR}", [{"VAR"}, {"B"}]) - self.check_referenced("${@d.getVar('VAR')}", [{"VAR"}, {"B"}]) - - def test_more_complicated(self): - self.d["SRC_URI"] = "${QT_GIT}/${QT_MODULE}.git;name=${QT_MODULE};${QT_MODULE_BRANCH_PARAM};protocol=${QT_GIT_PROTOCOL}" - self.d["QT_GIT"] = "git://code.qt.io/${QT_GIT_PROJECT}" - self.d["QT_MODULE_BRANCH_PARAM"] = "branch=${QT_MODULE_BRANCH}" - self.d["QT_MODULE"] = "${BPN}" - self.d["BPN"] = "something to do with ${PN} and ${SPECIAL_PKGSUFFIX}" - - layers = [{"SRC_URI"}, {"QT_GIT", "QT_MODULE", "QT_MODULE_BRANCH_PARAM", "QT_GIT_PROTOCOL"}, {"QT_GIT_PROJECT", "QT_MODULE_BRANCH", "BPN"}, {"PN", "SPECIAL_PKGSUFFIX"}] - self.check_referenced("${SRC_URI}", layers) - - -class EnvironmentTests(unittest.TestCase): - def test_environment(self): - os.environ["A"] = "this is A" - self.assertIn("A", os.environ) - self.assertEqual(os.environ["A"], "this is A") - self.assertNotIn("B", os.environ) - - with bb.utils.environment(B="this is B"): - self.assertIn("A", os.environ) - self.assertEqual(os.environ["A"], "this is A") - self.assertIn("B", os.environ) - self.assertEqual(os.environ["B"], "this is B") - - self.assertIn("A", os.environ) - self.assertEqual(os.environ["A"], "this is A") - self.assertNotIn("B", os.environ) - -class FilemodeTests(unittest.TestCase): - def test_filemode_convert(self): - self.assertEqual(0o775, bb.utils.to_filemode("0o775")) - self.assertEqual(0o775, bb.utils.to_filemode(0o775)) - self.assertEqual(0o775, bb.utils.to_filemode("775")) - with self.assertRaises(ValueError): - bb.utils.to_filemode("xyz") - with self.assertRaises(ValueError): - bb.utils.to_filemode("999") - diff --git a/bitbake/lib/bb/tinfoil.py b/bitbake/lib/bb/tinfoil.py deleted file mode 100644 index e7fbcbca0a..0000000000 --- a/bitbake/lib/bb/tinfoil.py +++ /dev/null @@ -1,1061 +0,0 @@ -# tinfoil: a simple wrapper around cooker for bitbake-based command-line utilities -# -# Copyright (C) 2012-2017 Intel Corporation -# Copyright (C) 2011 Mentor Graphics Corporation -# Copyright (C) 2006-2012 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import logging -import os -import sys -import time -import atexit -import re -from collections import OrderedDict, defaultdict -from functools import partial, wraps -from contextlib import contextmanager - -import bb.cache -import bb.cooker -import bb.providers -import bb.taskdata -import bb.utils -import bb.command -import bb.remotedata -from bb.main import setup_bitbake, BitBakeConfigParameters -import bb.fetch2 - -def wait_for(f): - """ - Wrap a function that makes an asynchronous tinfoil call using - self.run_command() and wait for events to say that the call has been - successful, or an error has occurred. - """ - @wraps(f) - def wrapper(self, *args, handle_events=True, extra_events=None, event_callback=None, **kwargs): - if handle_events: - # A reasonable set of default events matching up with those we handle below - eventmask = [ - 'bb.event.BuildStarted', - 'bb.event.BuildCompleted', - 'logging.LogRecord', - 'bb.event.NoProvider', - 'bb.command.CommandCompleted', - 'bb.command.CommandFailed', - 'bb.build.TaskStarted', - 'bb.build.TaskFailed', - 'bb.build.TaskSucceeded', - 'bb.build.TaskFailedSilent', - 'bb.build.TaskProgress', - 'bb.runqueue.runQueueTaskStarted', - 'bb.runqueue.sceneQueueTaskStarted', - 'bb.event.ProcessStarted', - 'bb.event.ProcessProgress', - 'bb.event.ProcessFinished', - ] - if extra_events: - eventmask.extend(extra_events) - ret = self.set_event_mask(eventmask) - - includelogs = self.config_data.getVar('BBINCLUDELOGS') - loglines = self.config_data.getVar('BBINCLUDELOGS_LINES') - - # Call actual function - ret = f(self, *args, **kwargs) - - if handle_events: - lastevent = time.time() - result = False - # Borrowed from knotty, instead somewhat hackily we use the helper - # as the object to store "shutdown" on - helper = bb.ui.uihelper.BBUIHelper() - helper.shutdown = 0 - parseprogress = None - termfilter = bb.ui.knotty.TerminalFilter(helper, helper, self.logger.handlers, quiet=self.quiet) - try: - while True: - try: - event = self.wait_event(0.25) - if event: - lastevent = time.time() - if event_callback and event_callback(event): - continue - if helper.eventHandler(event): - if isinstance(event, bb.build.TaskFailedSilent): - self.logger.warning("Logfile for failed setscene task is %s" % event.logfile) - elif isinstance(event, bb.build.TaskFailed): - bb.ui.knotty.print_event_log(event, includelogs, loglines, termfilter) - continue - if isinstance(event, bb.event.ProcessStarted): - if self.quiet > 1: - continue - parseprogress = bb.ui.knotty.new_progress(event.processname, event.total) - parseprogress.start(False) - continue - if isinstance(event, bb.event.ProcessProgress): - if self.quiet > 1: - continue - if parseprogress: - parseprogress.update(event.progress) - else: - bb.warn("Got ProcessProgress event for something that never started?") - continue - if isinstance(event, bb.event.ProcessFinished): - if self.quiet > 1: - continue - if parseprogress: - parseprogress.finish() - parseprogress = None - continue - if isinstance(event, bb.command.CommandCompleted): - result = True - break - if isinstance(event, (bb.command.CommandFailed, bb.command.CommandExit)): - self.logger.error(str(event)) - result = False - break - if isinstance(event, logging.LogRecord): - if event.taskpid == 0 or event.levelno > logging.INFO: - self.logger.handle(event) - continue - if isinstance(event, bb.event.NoProvider): - self.logger.error(str(event)) - result = False - break - elif helper.shutdown > 1: - break - termfilter.updateFooter() - if time.time() > (lastevent + (3*60)): - if not self.run_command('ping', handle_events=False): - print("\nUnable to ping server and no events, closing down...\n") - return False - except KeyboardInterrupt: - termfilter.clearFooter() - if helper.shutdown == 1: - print("\nSecond Keyboard Interrupt, stopping...\n") - ret = self.run_command("stateForceShutdown") - if ret and ret[2]: - self.logger.error("Unable to cleanly stop: %s" % ret[2]) - elif helper.shutdown == 0: - print("\nKeyboard Interrupt, closing down...\n") - interrupted = True - ret = self.run_command("stateShutdown") - if ret and ret[2]: - self.logger.error("Unable to cleanly shutdown: %s" % ret[2]) - helper.shutdown = helper.shutdown + 1 - termfilter.clearFooter() - finally: - termfilter.finish() - if helper.failed_tasks: - result = False - return result - else: - return ret - - return wrapper - - -# We need this in order to shut down the connection to the bitbake server, -# otherwise the process will never properly exit -_server_connections = [] -def _terminate_connections(): - for connection in _server_connections: - connection.terminate() -atexit.register(_terminate_connections) - -class TinfoilUIException(Exception): - """Exception raised when the UI returns non-zero from its main function""" - def __init__(self, returncode): - self.returncode = returncode - def __repr__(self): - return 'UI module main returned %d' % self.returncode - -class TinfoilCommandFailed(Exception): - """Exception raised when run_command fails""" - -class TinfoilDataStoreConnectorVarHistory: - def __init__(self, tinfoil, dsindex): - self.tinfoil = tinfoil - self.dsindex = dsindex - - def remoteCommand(self, cmd, *args, **kwargs): - return self.tinfoil.run_command('dataStoreConnectorVarHistCmd', self.dsindex, cmd, args, kwargs) - - def emit(self, var, oval, val, o, d): - ret = self.tinfoil.run_command('dataStoreConnectorVarHistCmdEmit', self.dsindex, var, oval, val, d.dsindex) - o.write(ret) - - def __getattr__(self, name): - if not hasattr(bb.data_smart.VariableHistory, name): - raise AttributeError("VariableHistory has no such method %s" % name) - - newfunc = partial(self.remoteCommand, name) - setattr(self, name, newfunc) - return newfunc - -class TinfoilDataStoreConnectorIncHistory: - def __init__(self, tinfoil, dsindex): - self.tinfoil = tinfoil - self.dsindex = dsindex - - def remoteCommand(self, cmd, *args, **kwargs): - return self.tinfoil.run_command('dataStoreConnectorIncHistCmd', self.dsindex, cmd, args, kwargs) - - def __getattr__(self, name): - if not hasattr(bb.data_smart.IncludeHistory, name): - raise AttributeError("IncludeHistory has no such method %s" % name) - - newfunc = partial(self.remoteCommand, name) - setattr(self, name, newfunc) - return newfunc - -class TinfoilDataStoreConnector: - """ - Connector object used to enable access to datastore objects via tinfoil - Method calls are transmitted to the remote datastore for processing, if a datastore is - returned we return a connector object for the new store - """ - - def __init__(self, tinfoil, dsindex): - self.tinfoil = tinfoil - self.dsindex = dsindex - self.varhistory = TinfoilDataStoreConnectorVarHistory(tinfoil, dsindex) - self.inchistory = TinfoilDataStoreConnectorIncHistory(tinfoil, dsindex) - - def remoteCommand(self, cmd, *args, **kwargs): - ret = self.tinfoil.run_command('dataStoreConnectorCmd', self.dsindex, cmd, args, kwargs) - if isinstance(ret, bb.command.DataStoreConnectionHandle): - return TinfoilDataStoreConnector(self.tinfoil, ret.dsindex) - return ret - - def __getattr__(self, name): - if not hasattr(bb.data._dict_type, name): - raise AttributeError("Data store has no such method %s" % name) - - newfunc = partial(self.remoteCommand, name) - setattr(self, name, newfunc) - return newfunc - - def __iter__(self): - keys = self.tinfoil.run_command('dataStoreConnectorCmd', self.dsindex, "keys", [], {}) - for k in keys: - yield k - -class TinfoilCookerAdapter: - """ - Provide an adapter for existing code that expects to access a cooker object via Tinfoil, - since now Tinfoil is on the client side it no longer has direct access. - """ - - class TinfoilCookerCollectionAdapter: - """ cooker.collection adapter """ - def __init__(self, tinfoil, mc=''): - self.tinfoil = tinfoil - self.mc = mc - def get_file_appends(self, fn): - return self.tinfoil.get_file_appends(fn, self.mc) - def __getattr__(self, name): - if name == 'overlayed': - return self.tinfoil.get_overlayed_recipes(self.mc) - elif name == 'bbappends': - return self.tinfoil.run_command('getAllAppends', self.mc) - else: - raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name)) - - class TinfoilRecipeCacheAdapter: - """ cooker.recipecache adapter """ - def __init__(self, tinfoil, mc=''): - self.tinfoil = tinfoil - self.mc = mc - self._cache = {} - - def get_pkg_pn_fn(self): - pkg_pn = defaultdict(list, self.tinfoil.run_command('getRecipes', self.mc) or []) - pkg_fn = {} - for pn, fnlist in pkg_pn.items(): - for fn in fnlist: - pkg_fn[fn] = pn - self._cache['pkg_pn'] = pkg_pn - self._cache['pkg_fn'] = pkg_fn - - def __getattr__(self, name): - # Grab these only when they are requested since they aren't always used - if name in self._cache: - return self._cache[name] - elif name == 'pkg_pn': - self.get_pkg_pn_fn() - return self._cache[name] - elif name == 'pkg_fn': - self.get_pkg_pn_fn() - return self._cache[name] - elif name == 'deps': - attrvalue = defaultdict(list, self.tinfoil.run_command('getRecipeDepends', self.mc) or []) - elif name == 'rundeps': - attrvalue = defaultdict(lambda: defaultdict(list), self.tinfoil.run_command('getRuntimeDepends', self.mc) or []) - elif name == 'runrecs': - attrvalue = defaultdict(lambda: defaultdict(list), self.tinfoil.run_command('getRuntimeRecommends', self.mc) or []) - elif name == 'pkg_pepvpr': - attrvalue = self.tinfoil.run_command('getRecipeVersions', self.mc) or {} - elif name == 'inherits': - attrvalue = self.tinfoil.run_command('getRecipeInherits', self.mc) or {} - elif name == 'bbfile_priority': - attrvalue = self.tinfoil.run_command('getBbFilePriority', self.mc) or {} - elif name == 'pkg_dp': - attrvalue = self.tinfoil.run_command('getDefaultPreference', self.mc) or {} - elif name == 'fn_provides': - attrvalue = self.tinfoil.run_command('getRecipeProvides', self.mc) or {} - elif name == 'packages': - attrvalue = self.tinfoil.run_command('getRecipePackages', self.mc) or {} - elif name == 'packages_dynamic': - attrvalue = self.tinfoil.run_command('getRecipePackagesDynamic', self.mc) or {} - elif name == 'rproviders': - attrvalue = self.tinfoil.run_command('getRProviders', self.mc) or {} - else: - raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name)) - - self._cache[name] = attrvalue - return attrvalue - - class TinfoilSkiplistByMcAdapter: - def __init__(self, tinfoil): - self.tinfoil = tinfoil - - def __getitem__(self, mc): - return self.tinfoil.get_skipped_recipes(mc) - - def __init__(self, tinfoil): - self.tinfoil = tinfoil - self.multiconfigs = [''] + (tinfoil.config_data.getVar('BBMULTICONFIG') or '').split() - self.collections = {} - self.recipecaches = {} - self.skiplist_by_mc = self.TinfoilSkiplistByMcAdapter(tinfoil) - for mc in self.multiconfigs: - self.collections[mc] = self.TinfoilCookerCollectionAdapter(tinfoil, mc) - self.recipecaches[mc] = self.TinfoilRecipeCacheAdapter(tinfoil, mc) - self._cache = {} - def __getattr__(self, name): - # Grab these only when they are requested since they aren't always used - if name in self._cache: - return self._cache[name] - elif name == 'bbfile_config_priorities': - ret = self.tinfoil.run_command('getLayerPriorities') - bbfile_config_priorities = [] - for collection, pattern, regex, pri in ret: - bbfile_config_priorities.append((collection, pattern, re.compile(regex), pri)) - - attrvalue = bbfile_config_priorities - else: - raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name)) - - self._cache[name] = attrvalue - return attrvalue - - def findBestProvider(self, pn): - return self.tinfoil.find_best_provider(pn) - - -class TinfoilRecipeInfo: - """ - Provides a convenient representation of the cached information for a single recipe. - Some attributes are set on construction, others are read on-demand (which internally - may result in a remote procedure call to the bitbake server the first time). - Note that only information which is cached is available through this object - if - you need other variable values you will need to parse the recipe using - Tinfoil.parse_recipe(). - """ - def __init__(self, recipecache, d, pn, fn, fns): - self._recipecache = recipecache - self._d = d - self.pn = pn - self.fn = fn - self.fns = fns - self.inherit_files = recipecache.inherits[fn] - self.depends = recipecache.deps[fn] - (self.pe, self.pv, self.pr) = recipecache.pkg_pepvpr[fn] - self._cached_packages = None - self._cached_rprovides = None - self._cached_packages_dynamic = None - - def __getattr__(self, name): - if name == 'alternates': - return [x for x in self.fns if x != self.fn] - elif name == 'rdepends': - return self._recipecache.rundeps[self.fn] - elif name == 'rrecommends': - return self._recipecache.runrecs[self.fn] - elif name == 'provides': - return self._recipecache.fn_provides[self.fn] - elif name == 'packages': - if self._cached_packages is None: - self._cached_packages = [] - for pkg, fns in self._recipecache.packages.items(): - if self.fn in fns: - self._cached_packages.append(pkg) - return self._cached_packages - elif name == 'packages_dynamic': - if self._cached_packages_dynamic is None: - self._cached_packages_dynamic = [] - for pkg, fns in self._recipecache.packages_dynamic.items(): - if self.fn in fns: - self._cached_packages_dynamic.append(pkg) - return self._cached_packages_dynamic - elif name == 'rprovides': - if self._cached_rprovides is None: - self._cached_rprovides = [] - for pkg, fns in self._recipecache.rproviders.items(): - if self.fn in fns: - self._cached_rprovides.append(pkg) - return self._cached_rprovides - else: - raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name)) - def inherits(self, only_recipe=False): - """ - Get the inherited classes for a recipe. Returns the class names only. - Parameters: - only_recipe: True to return only the classes inherited by the recipe - itself, False to return all classes inherited within - the context for the recipe (which includes globally - inherited classes). - """ - if only_recipe: - global_inherit = [x for x in (self._d.getVar('BBINCLUDED') or '').split() if x.endswith('.bbclass')] - else: - global_inherit = [] - for clsfile in self.inherit_files: - if only_recipe and clsfile in global_inherit: - continue - clsname = os.path.splitext(os.path.basename(clsfile))[0] - yield clsname - def __str__(self): - return '%s' % self.pn - - -class Tinfoil: - """ - Tinfoil - an API for scripts and utilities to query - BitBake internals and perform build operations. - """ - - def __init__(self, output=sys.stdout, tracking=False, setup_logging=True): - """ - Create a new tinfoil object. - Parameters: - output: specifies where console output should be sent. Defaults - to sys.stdout. - tracking: True to enable variable history tracking, False to - disable it (default). Enabling this has a minor - performance impact so typically it isn't enabled - unless you need to query variable history. - setup_logging: True to setup a logger so that things like - bb.warn() will work immediately and timeout warnings - are visible; False to let BitBake do this itself. - """ - self.logger = logging.getLogger('BitBake') - self.config_data = None - self.cooker = None - self.tracking = tracking - self.ui_module = None - self.server_connection = None - self.recipes_parsed = False - self.quiet = 0 - self.oldhandlers = self.logger.handlers[:] - self.localhandlers = [] - if setup_logging: - # This is the *client-side* logger, nothing to do with - # logging messages from the server - bb.msg.logger_create('BitBake', output) - for handler in self.logger.handlers: - if handler not in self.oldhandlers: - self.localhandlers.append(handler) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.shutdown() - - def prepare(self, config_only=False, config_params=None, quiet=0, extra_features=None): - """ - Prepares the underlying BitBake system to be used via tinfoil. - This function must be called prior to calling any of the other - functions in the API. - NOTE: if you call prepare() you must absolutely call shutdown() - before your code terminates. You can use a "with" block to ensure - this happens e.g. - - with bb.tinfoil.Tinfoil() as tinfoil: - tinfoil.prepare() - ... - - Parameters: - config_only: True to read only the configuration and not load - the cache / parse recipes. This is useful if you just - want to query the value of a variable at the global - level or you want to do anything else that doesn't - involve knowing anything about the recipes in the - current configuration. False loads the cache / parses - recipes. - config_params: optionally specify your own configuration - parameters. If not specified an instance of - TinfoilConfigParameters will be created internally. - quiet: quiet level controlling console output - equivalent - to bitbake's -q/--quiet option. Default of 0 gives - the same output level as normal bitbake execution. - extra_features: extra features to be added to the feature - set requested from the server. See - CookerFeatures._feature_list for possible - features. - """ - self.quiet = quiet - - if self.tracking: - extrafeatures = [bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING] - else: - extrafeatures = [] - - if extra_features: - extrafeatures += extra_features - - if not config_params: - config_params = TinfoilConfigParameters(config_only=config_only, quiet=quiet) - - if not config_only: - # Disable local loggers because the UI module is going to set up its own - for handler in self.localhandlers: - self.logger.handlers.remove(handler) - self.localhandlers = [] - - self.server_connection, ui_module = setup_bitbake(config_params, extrafeatures) - - self.ui_module = ui_module - - # Ensure the path to bitbake's bin directory is in PATH so that things like - # bitbake-worker can be run (usually this is the case, but it doesn't have to be) - path = os.getenv('PATH').split(':') - bitbakebinpath = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'bin')) - for entry in path: - if entry.endswith(os.sep): - entry = entry[:-1] - if os.path.abspath(entry) == bitbakebinpath: - break - else: - path.insert(0, bitbakebinpath) - os.environ['PATH'] = ':'.join(path) - - if self.server_connection: - _server_connections.append(self.server_connection) - if config_only: - config_params.updateToServer(self.server_connection.connection, os.environ.copy()) - self.run_command('parseConfiguration') - else: - self.run_actions(config_params) - self.recipes_parsed = True - - self.config_data = TinfoilDataStoreConnector(self, 0) - self.cooker = TinfoilCookerAdapter(self) - self.cooker_data = self.cooker.recipecaches[''] - else: - raise Exception('Failed to start bitbake server') - - def run_actions(self, config_params): - """ - Run the actions specified in config_params through the UI. - """ - ret = self.ui_module.main(self.server_connection.connection, self.server_connection.events, config_params) - if ret: - raise TinfoilUIException(ret) - - def parseRecipes(self): - """ - Legacy function - use parse_recipes() instead. - """ - self.parse_recipes() - - def parse_recipes(self): - """ - Load information on all recipes. Normally you should specify - config_only=False when calling prepare() instead of using this - function; this function is designed for situations where you need - to initialise Tinfoil and use it with config_only=True first and - then conditionally call this function to parse recipes later. - """ - config_params = TinfoilConfigParameters(config_only=False, quiet=self.quiet) - self.run_actions(config_params) - self.recipes_parsed = True - - def modified_files(self): - """ - Notify the server it needs to revalidate it's caches since the client has modified files - """ - self.run_command("revalidateCaches") - - def run_command(self, command, *params, handle_events=True): - """ - Run a command on the server (as implemented in bb.command). - Note that there are two types of command - synchronous and - asynchronous; in order to receive the results of asynchronous - commands you will need to set an appropriate event mask - using set_event_mask() and listen for the result using - wait_event() - with the correct event mask you'll at least get - bb.command.CommandCompleted and possibly other events before - that depending on the command. - """ - if not self.server_connection: - raise Exception('Not connected to server (did you call .prepare()?)') - - commandline = [command] - if params: - commandline.extend(params) - try: - result = self.server_connection.connection.runCommand(commandline) - finally: - while handle_events: - event = self.wait_event() - if not event: - break - if isinstance(event, logging.LogRecord): - if event.taskpid == 0 or event.levelno > logging.INFO: - self.logger.handle(event) - if result[1]: - raise TinfoilCommandFailed(result[1]) - return result[0] - - def set_event_mask(self, eventlist): - """Set the event mask which will be applied within wait_event()""" - if not self.server_connection: - raise Exception('Not connected to server (did you call .prepare()?)') - llevel, debug_domains = bb.msg.constructLogOptions() - ret = self.run_command('setEventMask', self.server_connection.connection.getEventHandle(), llevel, debug_domains, eventlist) - if not ret: - raise Exception('setEventMask failed') - - def wait_event(self, timeout=0): - """ - Wait for an event from the server for the specified time. - A timeout of 0 means don't wait if there are no events in the queue. - Returns the next event in the queue or None if the timeout was - reached. Note that in order to receive any events you will - first need to set the internal event mask using set_event_mask() - (otherwise whatever event mask the UI set up will be in effect). - """ - if not self.server_connection: - raise Exception('Not connected to server (did you call .prepare()?)') - return self.server_connection.events.waitEvent(timeout) - - def get_overlayed_recipes(self, mc=''): - """ - Find recipes which are overlayed (i.e. where recipes exist in multiple layers) - """ - return defaultdict(list, self.run_command('getOverlayedRecipes', mc)) - - def get_skipped_recipes(self, mc=''): - """ - Find recipes which were skipped (i.e. SkipRecipe was raised - during parsing). - """ - return OrderedDict(self.run_command('getSkippedRecipes', mc)) - - def get_all_providers(self, mc=''): - return defaultdict(list, self.run_command('allProviders', mc)) - - def find_providers(self, mc=''): - return self.run_command('findProviders', mc) - - def find_best_provider(self, pn): - return self.run_command('findBestProvider', pn) - - def get_runtime_providers(self, rdep): - return self.run_command('getRuntimeProviders', rdep) - - # TODO: teach this method about mc - def get_recipe_file(self, pn): - """ - Get the file name for the specified recipe/target. Raises - bb.providers.NoProvider if there is no match or the recipe was - skipped. - """ - best = self.find_best_provider(pn) - if not best or (len(best) > 3 and not best[3]): - # TODO: pass down mc - skiplist = self.get_skipped_recipes() - taskdata = bb.taskdata.TaskData(None, skiplist=skiplist) - skipreasons = taskdata.get_reasons(pn) - if skipreasons: - raise bb.providers.NoProvider('%s is unavailable:\n %s' % (pn, ' \n'.join(skipreasons))) - else: - raise bb.providers.NoProvider('Unable to find any recipe file matching "%s"' % pn) - return best[3] - - def get_file_appends(self, fn, mc=''): - """ - Find the bbappends for a recipe file - """ - return self.run_command('getFileAppends', fn, mc) - - def all_recipes(self, mc='', sort=True): - """ - Enable iterating over all recipes in the current configuration. - Returns an iterator over TinfoilRecipeInfo objects created on demand. - Parameters: - mc: The multiconfig, default of '' uses the main configuration. - sort: True to sort recipes alphabetically (default), False otherwise - """ - recipecache = self.cooker.recipecaches[mc] - if sort: - recipes = sorted(recipecache.pkg_pn.items()) - else: - recipes = recipecache.pkg_pn.items() - for pn, fns in recipes: - prov = self.find_best_provider(pn) - recipe = TinfoilRecipeInfo(recipecache, - self.config_data, - pn=pn, - fn=prov[3], - fns=fns) - yield recipe - - def all_recipe_files(self, mc='', variants=True, preferred_only=False): - """ - Enable iterating over all recipe files in the current configuration. - Returns an iterator over file paths. - Parameters: - mc: The multiconfig, default of '' uses the main configuration. - variants: True to include variants of recipes created through - BBCLASSEXTEND (default) or False to exclude them - preferred_only: True to include only the preferred recipe where - multiple exist providing the same PN, False to list - all recipes - """ - recipecache = self.cooker.recipecaches[mc] - if preferred_only: - files = [] - for pn in recipecache.pkg_pn.keys(): - prov = self.find_best_provider(pn) - files.append(prov[3]) - else: - files = recipecache.pkg_fn.keys() - for fn in sorted(files): - if not variants and fn.startswith('virtual:'): - continue - yield fn - - - def get_recipe_info(self, pn, mc=''): - """ - Get information on a specific recipe in the current configuration by name (PN). - Returns a TinfoilRecipeInfo object created on demand. - Parameters: - mc: The multiconfig, default of '' uses the main configuration. - """ - recipecache = self.cooker.recipecaches[mc] - prov = self.find_best_provider(pn) - fn = prov[3] - if fn: - actual_pn = recipecache.pkg_fn[fn] - recipe = TinfoilRecipeInfo(recipecache, - self.config_data, - pn=actual_pn, - fn=fn, - fns=recipecache.pkg_pn[actual_pn]) - return recipe - else: - return None - - def parse_recipe(self, pn): - """ - Parse the specified recipe and return a datastore object - representing the environment for the recipe. - """ - fn = self.get_recipe_file(pn) - return self.parse_recipe_file(fn) - - @contextmanager - def _data_tracked_if_enabled(self): - """ - A context manager to enable data tracking for a code segment if data - tracking was enabled for this tinfoil instance. - """ - if self.tracking: - # Enable history tracking just for the operation - self.run_command('enableDataTracking') - - # Here goes the operation with the optional data tracking - yield - - if self.tracking: - self.run_command('disableDataTracking') - - def finalizeData(self): - """ - Run anonymous functions and expand keys - """ - with self._data_tracked_if_enabled(): - return self._reconvert_type(self.run_command('finalizeData'), 'DataStoreConnectionHandle') - - def parse_recipe_file(self, fn, appends=True, appendlist=None, config_data=None): - """ - Parse the specified recipe file (with or without bbappends) - and return a datastore object representing the environment - for the recipe. - Parameters: - fn: recipe file to parse - can be a file path or virtual - specification - appends: True to apply bbappends, False otherwise - appendlist: optional list of bbappend files to apply, if you - want to filter them - """ - with self._data_tracked_if_enabled(): - if appends and appendlist == []: - appends = False - if config_data: - config_data = bb.data.createCopy(config_data) - dscon = self.run_command('parseRecipeFile', fn, appends, appendlist, config_data.dsindex) - else: - dscon = self.run_command('parseRecipeFile', fn, appends, appendlist) - if dscon: - return self._reconvert_type(dscon, 'DataStoreConnectionHandle') - else: - return None - - def build_file(self, buildfile, task, internal=True): - """ - Runs the specified task for just a single recipe (i.e. no dependencies). - This is equivalent to bitbake -b, except with the default internal=True - no warning about dependencies will be produced, normal info messages - from the runqueue will be silenced and BuildInit, BuildStarted and - BuildCompleted events will not be fired. - """ - return self.run_command('buildFile', buildfile, task, internal) - - @wait_for - def build_file_sync(self, *args): - self.build_file(*args) - - def build_targets(self, targets, task=None, handle_events=True, extra_events=None, event_callback=None): - """ - Builds the specified targets. This is equivalent to a normal invocation - of bitbake. Has built-in event handling which is enabled by default and - can be extended if needed. - Parameters: - targets: - One or more targets to build. Can be a list or a - space-separated string. - task: - The task to run; if None then the value of BB_DEFAULT_TASK - will be used. Default None. - handle_events: - True to handle events in a similar way to normal bitbake - invocation with knotty; False to return immediately (on the - assumption that the caller will handle the events instead). - Default True. - extra_events: - An optional list of events to add to the event mask (if - handle_events=True). If you add events here you also need - to specify a callback function in event_callback that will - handle the additional events. Default None. - event_callback: - An optional function taking a single parameter which - will be called first upon receiving any event (if - handle_events=True) so that the caller can override or - extend the event handling. Default None. - """ - if isinstance(targets, str): - targets = targets.split() - if not task: - task = self.config_data.getVar('BB_DEFAULT_TASK') - - if handle_events: - # A reasonable set of default events matching up with those we handle below - eventmask = [ - 'bb.event.BuildStarted', - 'bb.event.BuildCompleted', - 'logging.LogRecord', - 'bb.event.NoProvider', - 'bb.command.CommandCompleted', - 'bb.command.CommandFailed', - 'bb.build.TaskStarted', - 'bb.build.TaskFailed', - 'bb.build.TaskSucceeded', - 'bb.build.TaskFailedSilent', - 'bb.build.TaskProgress', - 'bb.runqueue.runQueueTaskStarted', - 'bb.runqueue.sceneQueueTaskStarted', - 'bb.event.ProcessStarted', - 'bb.event.ProcessProgress', - 'bb.event.ProcessFinished', - ] - if extra_events: - eventmask.extend(extra_events) - ret = self.set_event_mask(eventmask) - - includelogs = self.config_data.getVar('BBINCLUDELOGS') - loglines = self.config_data.getVar('BBINCLUDELOGS_LINES') - - ret = self.run_command('buildTargets', targets, task) - if handle_events: - lastevent = time.time() - result = False - # Borrowed from knotty, instead somewhat hackily we use the helper - # as the object to store "shutdown" on - helper = bb.ui.uihelper.BBUIHelper() - helper.shutdown = 0 - parseprogress = None - termfilter = bb.ui.knotty.TerminalFilter(helper, helper, self.logger.handlers, quiet=self.quiet) - try: - while True: - try: - event = self.wait_event(0.25) - if event: - lastevent = time.time() - if event_callback and event_callback(event): - continue - if helper.eventHandler(event): - if isinstance(event, bb.build.TaskFailedSilent): - self.logger.warning("Logfile for failed setscene task is %s" % event.logfile) - elif isinstance(event, bb.build.TaskFailed): - bb.ui.knotty.print_event_log(event, includelogs, loglines, termfilter) - continue - if isinstance(event, bb.event.ProcessStarted): - if self.quiet > 1: - continue - parseprogress = bb.ui.knotty.new_progress(event.processname, event.total) - parseprogress.start(False) - continue - if isinstance(event, bb.event.ProcessProgress): - if self.quiet > 1: - continue - if parseprogress: - parseprogress.update(event.progress) - else: - bb.warn("Got ProcessProgress event for something that never started?") - continue - if isinstance(event, bb.event.ProcessFinished): - if self.quiet > 1: - continue - if parseprogress: - parseprogress.finish() - parseprogress = None - continue - if isinstance(event, bb.command.CommandCompleted): - result = True - break - if isinstance(event, (bb.command.CommandFailed, bb.command.CommandExit)): - self.logger.error(str(event)) - result = False - break - if isinstance(event, logging.LogRecord): - if event.taskpid == 0 or event.levelno > logging.INFO: - self.logger.handle(event) - continue - if isinstance(event, bb.event.NoProvider): - self.logger.error(str(event)) - result = False - break - elif helper.shutdown > 1: - break - termfilter.updateFooter() - if time.time() > (lastevent + (3*60)): - if not self.run_command('ping', handle_events=False): - print("\nUnable to ping server and no events, closing down...\n") - return False - except KeyboardInterrupt: - termfilter.clearFooter() - if helper.shutdown == 1: - print("\nSecond Keyboard Interrupt, stopping...\n") - ret = self.run_command("stateForceShutdown") - if ret and ret[2]: - self.logger.error("Unable to cleanly stop: %s" % ret[2]) - elif helper.shutdown == 0: - print("\nKeyboard Interrupt, closing down...\n") - interrupted = True - ret = self.run_command("stateShutdown") - if ret and ret[2]: - self.logger.error("Unable to cleanly shutdown: %s" % ret[2]) - helper.shutdown = helper.shutdown + 1 - termfilter.clearFooter() - finally: - termfilter.finish() - if helper.failed_tasks: - result = False - return result - else: - return ret - - def shutdown(self): - """ - Shut down tinfoil. Disconnects from the server and gracefully - releases any associated resources. You must call this function if - prepare() has been called, or use a with... block when you create - the tinfoil object which will ensure that it gets called. - """ - try: - if self.server_connection: - try: - self.run_command('clientComplete') - finally: - _server_connections.remove(self.server_connection) - bb.event.ui_queue = [] - self.server_connection.terminate() - self.server_connection = None - - finally: - # Restore logging handlers to how it looked when we started - if self.oldhandlers: - for handler in self.logger.handlers: - if handler not in self.oldhandlers: - self.logger.handlers.remove(handler) - - def _reconvert_type(self, obj, origtypename): - """ - Convert an object back to the right type, in the case - that marshalling has changed it (especially with xmlrpc) - """ - supported_types = { - 'set': set, - 'DataStoreConnectionHandle': bb.command.DataStoreConnectionHandle, - } - - origtype = supported_types.get(origtypename, None) - if origtype is None: - raise Exception('Unsupported type "%s"' % origtypename) - if type(obj) == origtype: - newobj = obj - elif isinstance(obj, dict): - # New style class - newobj = origtype() - for k,v in obj.items(): - setattr(newobj, k, v) - else: - # Assume we can coerce the type - newobj = origtype(obj) - - if isinstance(newobj, bb.command.DataStoreConnectionHandle): - newobj = TinfoilDataStoreConnector(self, newobj.dsindex) - - return newobj - - -class TinfoilConfigParameters(BitBakeConfigParameters): - - def __init__(self, config_only, **options): - self.initial_options = options - # Apply some sane defaults - if not 'parse_only' in options: - self.initial_options['parse_only'] = not config_only - #if not 'status_only' in options: - # self.initial_options['status_only'] = config_only - if not 'ui' in options: - self.initial_options['ui'] = 'knotty' - if not 'argv' in options: - self.initial_options['argv'] = [] - - super(TinfoilConfigParameters, self).__init__() - - def parseCommandLine(self, argv=None): - # We don't want any parameters parsed from the command line - opts = super(TinfoilConfigParameters, self).parseCommandLine([]) - for key, val in self.initial_options.items(): - setattr(opts[0], key, val) - return opts diff --git a/bitbake/lib/bb/ui/__init__.py b/bitbake/lib/bb/ui/__init__.py deleted file mode 100644 index 4b7ac36ca6..0000000000 --- a/bitbake/lib/bb/ui/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# -# BitBake UI Implementation -# -# Copyright (C) 2006-2007 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# diff --git a/bitbake/lib/bb/ui/buildinfohelper.py b/bitbake/lib/bb/ui/buildinfohelper.py deleted file mode 100644 index 4ee45d67a2..0000000000 --- a/bitbake/lib/bb/ui/buildinfohelper.py +++ /dev/null @@ -1,1996 +0,0 @@ -# -# BitBake ToasterUI Implementation -# -# Copyright (C) 2013 Intel Corporation -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import sys -import bb -import re -import os - -import django -from django.utils import timezone - -import toaster -# Add toaster module to the search path to help django.setup() find the right -# modules -sys.path.insert(0, os.path.dirname(toaster.__file__)) - -#Set the DJANGO_SETTINGS_MODULE if it's not already set -os.environ["DJANGO_SETTINGS_MODULE"] =\ - os.environ.get("DJANGO_SETTINGS_MODULE", - "toaster.toastermain.settings") -# Setup django framework (needs to be done before importing modules) -django.setup() - -from orm.models import Build, Task, Recipe, Layer_Version, Layer, Target, LogMessage, HelpText -from orm.models import Target_Image_File, TargetKernelFile, TargetSDKFile -from orm.models import Variable, VariableHistory -from orm.models import Package, Package_File, Target_Installed_Package, Target_File -from orm.models import Task_Dependency, Package_Dependency -from orm.models import Recipe_Dependency, Provides -from orm.models import Project, CustomImagePackage -from orm.models import signal_runbuilds - -from bldcontrol.models import BuildEnvironment, BuildRequest -from bldcontrol.models import BRLayer -from bldcontrol import bbcontroller - -from bb.msg import BBLogFormatter as formatter -from django.db import models -from pprint import pformat -import logging -from datetime import datetime, timedelta - -from django.db import transaction - - -# pylint: disable=invalid-name -# the logger name is standard throughout BitBake -logger = logging.getLogger("ToasterLogger") - -class NotExisting(Exception): - pass - -class ORMWrapper(object): - """ This class creates the dictionaries needed to store information in the database - following the format defined by the Django models. It is also used to save this - information in the database. - """ - - def __init__(self): - self.layer_version_objects = [] - self.layer_version_built = [] - self.task_objects = {} - self.recipe_objects = {} - - @staticmethod - def _build_key(**kwargs): - key = "0" - for k in sorted(kwargs.keys()): - if isinstance(kwargs[k], models.Model): - key += "-%d" % kwargs[k].id - else: - key += "-%s" % str(kwargs[k]) - return key - - - def _cached_get_or_create(self, clazz, **kwargs): - """ This is a memory-cached get_or_create. We assume that the objects will not be created in the - database through any other means. - """ - - assert issubclass(clazz, models.Model), "_cached_get_or_create needs to get the class as first argument" - - key = ORMWrapper._build_key(**kwargs) - dictname = "objects_%s" % clazz.__name__ - if not dictname in vars(self).keys(): - vars(self)[dictname] = {} - - created = False - if not key in vars(self)[dictname].keys(): - vars(self)[dictname][key], created = \ - clazz.objects.get_or_create(**kwargs) - - return (vars(self)[dictname][key], created) - - - def _cached_get(self, clazz, **kwargs): - """ This is a memory-cached get. We assume that the objects will not change in the database between gets. - """ - assert issubclass(clazz, models.Model), "_cached_get needs to get the class as first argument" - - key = ORMWrapper._build_key(**kwargs) - dictname = "objects_%s" % clazz.__name__ - - if not dictname in vars(self).keys(): - vars(self)[dictname] = {} - - if not key in vars(self)[dictname].keys(): - vars(self)[dictname][key] = clazz.objects.get(**kwargs) - - return vars(self)[dictname][key] - - def get_similar_target_with_image_files(self, target): - """ - Get a Target object "similar" to target; i.e. with the same target - name ('core-image-minimal' etc.) and machine. - """ - return target.get_similar_target_with_image_files() - - def get_similar_target_with_sdk_files(self, target): - return target.get_similar_target_with_sdk_files() - - def clone_image_artifacts(self, target_from, target_to): - target_to.clone_image_artifacts_from(target_from) - - def clone_sdk_artifacts(self, target_from, target_to): - target_to.clone_sdk_artifacts_from(target_from) - - def _timestamp_to_datetime(self, secs): - """ - Convert timestamp in seconds to Python datetime - """ - return timezone.make_aware(datetime(1970, 1, 1) + timedelta(seconds=secs)) - - # pylint: disable=no-self-use - # we disable detection of no self use in functions because the methods actually work on the object - # even if they don't touch self anywhere - - # pylint: disable=bad-continuation - # we do not follow the python conventions for continuation indentation due to long lines here - - def get_or_create_build_object(self, brbe): - prj = None - buildrequest = None - if brbe is not None: - # Toaster-triggered build - logger.debug("buildinfohelper: brbe is %s" % brbe) - br, _ = brbe.split(":") - buildrequest = BuildRequest.objects.get(pk=br) - prj = buildrequest.project - else: - # CLI build - prj = Project.objects.get_or_create_default_project() - logger.debug("buildinfohelper: project is not specified, defaulting to %s" % prj) - - if buildrequest is not None: - # reuse existing Build object - build = buildrequest.build - build.project = prj - build.save() - else: - # create new Build object - now = timezone.now() - build = Build.objects.create( - project=prj, - started_on=now, - completed_on=now, - build_name='') - - logger.debug("buildinfohelper: build is created %s" % build) - - if buildrequest is not None: - buildrequest.build = build - buildrequest.save() - - return build - - def update_build(self, build, data_dict): - for key in data_dict: - setattr(build, key, data_dict[key]) - build.save() - - @staticmethod - def get_or_create_targets(target_info): - """ - NB get_or_create() is used here because for Toaster-triggered builds, - we already created the targets when the build was triggered. - """ - result = [] - for target in target_info['targets']: - task = '' - if ':' in target: - target, task = target.split(':', 1) - if task.startswith('do_'): - task = task[3:] - if task == 'build': - task = '' - - obj, _ = Target.objects.get_or_create(build=target_info['build'], - target=target, - task=task) - result.append(obj) - return result - - def update_build_stats_and_outcome(self, build, errors, warnings, taskfailures): - assert isinstance(build,Build) - assert isinstance(errors, int) - assert isinstance(warnings, int) - - if build.outcome == Build.CANCELLED: - return - try: - if build.buildrequest.state == BuildRequest.REQ_CANCELLING: - return - except AttributeError: - # We may not have a buildrequest if this is a command line build - pass - - outcome = Build.SUCCEEDED - if errors or taskfailures: - outcome = Build.FAILED - - build.completed_on = timezone.now() - build.outcome = outcome - build.save() - - # We force a sync point here to force the outcome status commit, - # which resolves a race condition with the build completion takedown - transaction.set_autocommit(True) - transaction.set_autocommit(False) - - signal_runbuilds() - - def update_target_set_license_manifest(self, target, license_manifest_path): - target.license_manifest_path = license_manifest_path - target.save() - - def update_target_set_package_manifest(self, target, package_manifest_path): - target.package_manifest_path = package_manifest_path - target.save() - - def update_task_object(self, build, task_name, recipe_name, task_stats): - """ - Find the task for build which matches the recipe and task name - to be stored - """ - task_to_update = Task.objects.get( - build = build, - task_name = task_name, - recipe__name = recipe_name - ) - - if 'started' in task_stats and 'ended' in task_stats: - task_to_update.started = self._timestamp_to_datetime(task_stats['started']) - task_to_update.ended = self._timestamp_to_datetime(task_stats['ended']) - task_to_update.elapsed_time = (task_stats['ended'] - task_stats['started']) - task_to_update.cpu_time_user = task_stats.get('cpu_time_user') - task_to_update.cpu_time_system = task_stats.get('cpu_time_system') - if 'disk_io_read' in task_stats and 'disk_io_write' in task_stats: - task_to_update.disk_io_read = task_stats['disk_io_read'] - task_to_update.disk_io_write = task_stats['disk_io_write'] - task_to_update.disk_io = task_stats['disk_io_read'] + task_stats['disk_io_write'] - - task_to_update.save() - - def get_update_task_object(self, task_information, must_exist = False): - assert 'build' in task_information - assert 'recipe' in task_information - assert 'task_name' in task_information - - # we use must_exist info for database look-up optimization - task_object, created = self._cached_get_or_create(Task, - build=task_information['build'], - recipe=task_information['recipe'], - task_name=task_information['task_name'] - ) - if created and must_exist: - task_information['debug'] = "build id %d, recipe id %d" % (task_information['build'].pk, task_information['recipe'].pk) - raise NotExisting("Task object created when expected to exist", task_information) - - object_changed = False - for v in vars(task_object): - if v in task_information.keys(): - if vars(task_object)[v] != task_information[v]: - vars(task_object)[v] = task_information[v] - object_changed = True - - # update setscene-related information if the task has a setscene - if task_object.outcome == Task.OUTCOME_COVERED and 1 == task_object.get_related_setscene().count(): - task_object.outcome = Task.OUTCOME_CACHED - object_changed = True - - outcome_task_setscene = Task.objects.get(task_executed=True, build = task_object.build, - recipe = task_object.recipe, task_name=task_object.task_name+"_setscene").outcome - if outcome_task_setscene == Task.OUTCOME_SUCCESS: - task_object.sstate_result = Task.SSTATE_RESTORED - object_changed = True - elif outcome_task_setscene == Task.OUTCOME_FAILED: - task_object.sstate_result = Task.SSTATE_FAILED - object_changed = True - - if object_changed: - task_object.save() - return task_object - - - def get_update_recipe_object(self, recipe_information, must_exist = False): - assert 'layer_version' in recipe_information - assert 'file_path' in recipe_information - assert 'pathflags' in recipe_information - - assert not recipe_information['file_path'].startswith("/") # we should have layer-relative paths at all times - - - def update_recipe_obj(recipe_object): - object_changed = False - for v in vars(recipe_object): - if v in recipe_information.keys(): - object_changed = True - vars(recipe_object)[v] = recipe_information[v] - - if object_changed: - recipe_object.save() - - recipe, created = self._cached_get_or_create(Recipe, layer_version=recipe_information['layer_version'], - file_path=recipe_information['file_path'], pathflags = recipe_information['pathflags']) - - update_recipe_obj(recipe) - - built_recipe = None - # Create a copy of the recipe for historical puposes and update it - for built_layer in self.layer_version_built: - if built_layer.layer == recipe_information['layer_version'].layer: - built_recipe, c = self._cached_get_or_create(Recipe, - layer_version=built_layer, - file_path=recipe_information['file_path'], - pathflags = recipe_information['pathflags']) - update_recipe_obj(built_recipe) - break - - - # If we're in analysis mode or if this is a custom recipe - # then we are wholly responsible for the data - # and therefore we return the 'real' recipe rather than the build - # history copy of the recipe. - if recipe_information['layer_version'].build is not None and \ - recipe_information['layer_version'].build.project == \ - Project.objects.get_or_create_default_project(): - return recipe - - if built_recipe is None: - return recipe - - return built_recipe - - def get_update_layer_version_object(self, build_obj, layer_obj, layer_version_information): - if isinstance(layer_obj, Layer_Version): - # We already found our layer version for this build so just - # update it with the new build information - logger.debug("We found our layer from toaster") - layer_obj.local_path = layer_version_information['local_path'] - layer_obj.save() - self.layer_version_objects.append(layer_obj) - - # create a new copy of this layer version as a snapshot for - # historical purposes - layer_copy, c = Layer_Version.objects.get_or_create( - build=build_obj, - layer=layer_obj.layer, - release=layer_obj.release, - branch=layer_version_information['branch'], - commit=layer_version_information['commit'], - local_path=layer_version_information['local_path'], - ) - - logger.debug("Created new layer version %s for build history", - layer_copy.layer.name) - - self.layer_version_built.append(layer_copy) - - return layer_obj - - assert isinstance(build_obj, Build) - assert isinstance(layer_obj, Layer) - assert 'branch' in layer_version_information - assert 'commit' in layer_version_information - assert 'priority' in layer_version_information - assert 'local_path' in layer_version_information - - # If we're doing a command line build then associate this new layer with the - # project to avoid it 'contaminating' toaster data - project = None - if build_obj.project == Project.objects.get_or_create_default_project(): - project = build_obj.project - - layer_version_object, _ = Layer_Version.objects.get_or_create( - build = build_obj, - layer = layer_obj, - branch = layer_version_information['branch'], - commit = layer_version_information['commit'], - priority = layer_version_information['priority'], - local_path = layer_version_information['local_path'], - project=project) - - self.layer_version_objects.append(layer_version_object) - - return layer_version_object - - def get_update_layer_object(self, layer_information, brbe): - assert 'name' in layer_information - assert 'layer_index_url' in layer_information - - # From command line builds we have no brbe as the request is directly - # from bitbake - if brbe is None: - # If we don't have git commit sha then we're using a non-git - # layer so set the layer_source_dir to identify it as such - if not layer_information['version']['commit']: - local_source_dir = layer_information["local_path"] - else: - local_source_dir = None - - layer_object, _ = \ - Layer.objects.get_or_create( - name=layer_information['name'], - local_source_dir=local_source_dir, - layer_index_url=layer_information['layer_index_url']) - - return layer_object - else: - br_id, be_id = brbe.split(":") - - # Find the layer version by matching the layer event information - # against the metadata we have in Toaster - - try: - br_layer = BRLayer.objects.get(req=br_id, - name=layer_information['name']) - return br_layer.layer_version - except (BRLayer.MultipleObjectsReturned, BRLayer.DoesNotExist): - # There are multiple of the same layer name or the name - # hasn't been determined by the toaster.bbclass layer - # so let's filter by the local_path - bc = bbcontroller.getBuildEnvironmentController(pk=be_id) - for br_layer in BRLayer.objects.filter(req=br_id): - if br_layer.giturl and \ - layer_information['local_path'].endswith( - bc.getGitCloneDirectory(br_layer.giturl, - br_layer.commit)): - return br_layer.layer_version - - if br_layer.local_source_dir == \ - layer_information['local_path']: - return br_layer.layer_version - - # We've reached the end of our search and couldn't find the layer - # we can continue but some data may be missing - raise NotExisting("Unidentified layer %s" % - pformat(layer_information)) - - def save_target_file_information(self, build_obj, target_obj, filedata): - assert isinstance(build_obj, Build) - assert isinstance(target_obj, Target) - dirs = filedata['dirs'] - files = filedata['files'] - syms = filedata['syms'] - - # always create the root directory as a special case; - # note that this is never displayed, so the owner, group, - # size, permission are irrelevant - tf_obj = Target_File.objects.create(target = target_obj, - path = '/', - size = 0, - owner = '', - group = '', - permission = '', - inodetype = Target_File.ITYPE_DIRECTORY) - tf_obj.save() - - # insert directories, ordered by name depth - for d in sorted(dirs, key=lambda x:len(x[-1].split("/"))): - (user, group, size) = d[1:4] - permission = d[0][1:] - path = d[4].lstrip(".") - - # we already created the root directory, so ignore any - # entry for it - if not path: - continue - - parent_path = "/".join(path.split("/")[:len(path.split("/")) - 1]) - if not parent_path: - parent_path = "/" - parent_obj = self._cached_get(Target_File, target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY) - Target_File.objects.create( - target = target_obj, - path = path, - size = size, - inodetype = Target_File.ITYPE_DIRECTORY, - permission = permission, - owner = user, - group = group, - directory = parent_obj) - - - # we insert files - for d in files: - (user, group, size) = d[1:4] - permission = d[0][1:] - path = d[4].lstrip(".") - parent_path = "/".join(path.split("/")[:len(path.split("/")) - 1]) - inodetype = Target_File.ITYPE_REGULAR - if d[0].startswith('b'): - inodetype = Target_File.ITYPE_BLOCK - if d[0].startswith('c'): - inodetype = Target_File.ITYPE_CHARACTER - if d[0].startswith('p'): - inodetype = Target_File.ITYPE_FIFO - - tf_obj = Target_File.objects.create( - target = target_obj, - path = path, - size = size, - inodetype = inodetype, - permission = permission, - owner = user, - group = group) - parent_obj = self._cached_get(Target_File, target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY) - tf_obj.directory = parent_obj - tf_obj.save() - - # we insert symlinks - for d in syms: - (user, group, size) = d[1:4] - permission = d[0][1:] - path = d[4].lstrip(".") - filetarget_path = d[6] - - parent_path = "/".join(path.split("/")[:len(path.split("/")) - 1]) - if not filetarget_path.startswith("/"): - # we have a relative path, get a normalized absolute one - filetarget_path = parent_path + "/" + filetarget_path - fcp = filetarget_path.split("/") - fcpl = [] - for i in fcp: - if i == "..": - fcpl.pop() - else: - fcpl.append(i) - filetarget_path = "/".join(fcpl) - - try: - filetarget_obj = Target_File.objects.get(target = target_obj, path = filetarget_path) - except Target_File.DoesNotExist: - # we might have an invalid link; no way to detect this. just set it to None - filetarget_obj = None - - try: - parent_obj = Target_File.objects.get(target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY) - except Target_File.DoesNotExist: - parent_obj = None - - Target_File.objects.create( - target = target_obj, - path = path, - size = size, - inodetype = Target_File.ITYPE_SYMLINK, - permission = permission, - owner = user, - group = group, - directory = parent_obj, - sym_target = filetarget_obj) - - - def save_target_package_information(self, build_obj, target_obj, packagedict, pkgpnmap, recipes, built_package=False): - assert isinstance(build_obj, Build) - assert isinstance(target_obj, Target) - - errormsg = [] - for p in packagedict: - # Search name swtiches round the installed name vs package name - # by default installed name == package name - searchname = p - if p not in pkgpnmap: - logger.warning("Image packages list contains %p, but is" - " missing from all packages list where the" - " metadata comes from. Skipping...", p) - continue - - if 'OPKGN' in pkgpnmap[p].keys(): - searchname = pkgpnmap[p]['OPKGN'] - - built_recipe = recipes[pkgpnmap[p]['PN']] - - if built_package: - packagedict[p]['object'], created = Package.objects.get_or_create( build = build_obj, name = searchname ) - recipe = built_recipe - else: - packagedict[p]['object'], created = \ - CustomImagePackage.objects.get_or_create(name=searchname) - # Clear the Package_Dependency objects as we're going to update - # the CustomImagePackage with the latest dependency information - packagedict[p]['object'].package_dependencies_target.all().delete() - packagedict[p]['object'].package_dependencies_source.all().delete() - try: - recipe = self._cached_get( - Recipe, - name=built_recipe.name, - layer_version__build=None, - layer_version__release= - built_recipe.layer_version.release, - file_path=built_recipe.file_path, - version=built_recipe.version - ) - except (Recipe.DoesNotExist, - Recipe.MultipleObjectsReturned) as e: - logger.info("We did not find one recipe for the" - "configuration data package %s %s" % (p, e)) - continue - - if created or packagedict[p]['object'].size == -1: # save the data anyway we can, not just if it was not created here; bug [YOCTO #6887] - # fill in everything we can from the runtime-reverse package data - try: - packagedict[p]['object'].recipe = recipe - packagedict[p]['object'].version = pkgpnmap[p]['PV'] - packagedict[p]['object'].installed_name = p - packagedict[p]['object'].revision = pkgpnmap[p]['PR'] - packagedict[p]['object'].license = pkgpnmap[p]['LICENSE'] - packagedict[p]['object'].section = pkgpnmap[p]['SECTION'] - packagedict[p]['object'].summary = pkgpnmap[p]['SUMMARY'] - packagedict[p]['object'].description = pkgpnmap[p]['DESCRIPTION'] - packagedict[p]['object'].size = int(pkgpnmap[p]['PKGSIZE']) - - # no files recorded for this package, so save files info - packagefile_objects = [] - for targetpath in pkgpnmap[p]['FILES_INFO']: - targetfilesize = pkgpnmap[p]['FILES_INFO'][targetpath] - packagefile_objects.append(Package_File( package = packagedict[p]['object'], - path = targetpath, - size = targetfilesize)) - if packagefile_objects: - Package_File.objects.bulk_create(packagefile_objects) - except KeyError as e: - errormsg.append(" stpi: Key error, package %s key %s \n" % (p, e)) - - # save disk installed size - packagedict[p]['object'].installed_size = packagedict[p]['size'] - packagedict[p]['object'].save() - - if built_package: - Target_Installed_Package.objects.create(target = target_obj, package = packagedict[p]['object']) - - packagedeps_objs = [] - pattern_so = re.compile(r'.*\.so(\.\d*)?$') - pattern_lib = re.compile(r'.*\-suffix(\d*)?$') - pattern_ko = re.compile(r'^kernel-module-.*') - for p in packagedict: - for (px,deptype) in packagedict[p]['depends']: - if deptype == 'depends': - tdeptype = Package_Dependency.TYPE_TRDEPENDS - elif deptype == 'recommends': - tdeptype = Package_Dependency.TYPE_TRECOMMENDS - - try: - # Skip known non-package objects like libraries and kernel modules - if pattern_so.match(px) or pattern_lib.match(px): - logger.info("Toaster does not add library file dependencies to packages (%s,%s)", p, px) - continue - if pattern_ko.match(px): - logger.info("Toaster does not add kernel module dependencies to packages (%s,%s)", p, px) - continue - packagedeps_objs.append(Package_Dependency( - package = packagedict[p]['object'], - depends_on = packagedict[px]['object'], - dep_type = tdeptype, - target = target_obj)) - except KeyError as e: - logger.warning("Could not add dependency to the package %s " - "because %s is an unknown package", p, px) - - if packagedeps_objs: - Package_Dependency.objects.bulk_create(packagedeps_objs) - else: - logger.info("No package dependencies created") - - if errormsg: - logger.warning("buildinfohelper: target_package_info could not identify recipes: \n%s", "".join(errormsg)) - - def save_target_image_file_information(self, target_obj, file_name, file_size): - Target_Image_File.objects.create(target=target_obj, - file_name=file_name, file_size=file_size) - - def save_target_kernel_file(self, target_obj, file_name, file_size): - """ - Save kernel file (bzImage, modules*) information for a Target target_obj. - """ - TargetKernelFile.objects.create(target=target_obj, - file_name=file_name, file_size=file_size) - - def save_target_sdk_file(self, target_obj, file_name, file_size): - """ - Save SDK artifacts to the database, associating them with a - Target object. - """ - TargetSDKFile.objects.create(target=target_obj, file_name=file_name, - file_size=file_size) - - def create_logmessage(self, log_information): - assert 'build' in log_information - assert 'level' in log_information - assert 'message' in log_information - - log_object = LogMessage.objects.create( - build = log_information['build'], - level = log_information['level'], - message = log_information['message']) - - for v in vars(log_object): - if v in log_information.keys(): - vars(log_object)[v] = log_information[v] - - return log_object.save() - - - def save_build_package_information(self, build_obj, package_info, recipes, - built_package): - # assert isinstance(build_obj, Build) - - if not 'PN' in package_info.keys(): - # no package data to save (e.g. 'OPKGN'="lib64-*"|"lib32-*") - return None - - # create and save the object - pname = package_info['PKG'] - built_recipe = recipes[package_info['PN']] - if 'OPKGN' in package_info.keys(): - pname = package_info['OPKGN'] - - if built_package: - bp_object, _ = Package.objects.get_or_create( build = build_obj, - name = pname ) - recipe = built_recipe - else: - bp_object, created = \ - CustomImagePackage.objects.get_or_create(name=pname) - try: - recipe = self._cached_get(Recipe, - name=built_recipe.name, - layer_version__build=None, - file_path=built_recipe.file_path, - version=built_recipe.version) - - except (Recipe.DoesNotExist, Recipe.MultipleObjectsReturned): - logger.debug("We did not find one recipe for the configuration" - "data package %s" % pname) - return - - bp_object.installed_name = package_info['PKG'] - bp_object.recipe = recipe - bp_object.version = package_info['PKGV'] - bp_object.revision = package_info['PKGR'] - bp_object.summary = package_info['SUMMARY'] - bp_object.description = package_info['DESCRIPTION'] - bp_object.size = int(package_info['PKGSIZE']) - bp_object.section = package_info['SECTION'] - bp_object.license = package_info['LICENSE'] - bp_object.save() - - # save any attached file information - packagefile_objects = [] - for path in package_info['FILES_INFO']: - packagefile_objects.append(Package_File( package = bp_object, - path = path, - size = package_info['FILES_INFO'][path] )) - if packagefile_objects: - Package_File.objects.bulk_create(packagefile_objects) - - def _po_byname(p): - if built_package: - pkg, created = Package.objects.get_or_create(build=build_obj, - name=p) - else: - pkg, created = CustomImagePackage.objects.get_or_create(name=p) - - if created: - pkg.size = -1 - pkg.save() - return pkg - - packagedeps_objs = [] - # save soft dependency information - if 'RDEPENDS' in package_info and package_info['RDEPENDS']: - for p in bb.utils.explode_deps(package_info['RDEPENDS']): - packagedeps_objs.append(Package_Dependency( package = bp_object, - depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RDEPENDS)) - if 'RPROVIDES' in package_info and package_info['RPROVIDES']: - for p in bb.utils.explode_deps(package_info['RPROVIDES']): - packagedeps_objs.append(Package_Dependency( package = bp_object, - depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RPROVIDES)) - if 'RRECOMMENDS' in package_info and package_info['RRECOMMENDS']: - for p in bb.utils.explode_deps(package_info['RRECOMMENDS']): - packagedeps_objs.append(Package_Dependency( package = bp_object, - depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RRECOMMENDS)) - if 'RSUGGESTS' in package_info and package_info['RSUGGESTS']: - for p in bb.utils.explode_deps(package_info['RSUGGESTS']): - packagedeps_objs.append(Package_Dependency( package = bp_object, - depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RSUGGESTS)) - if 'RREPLACES' in package_info and package_info['RREPLACES']: - for p in bb.utils.explode_deps(package_info['RREPLACES']): - packagedeps_objs.append(Package_Dependency( package = bp_object, - depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RREPLACES)) - if 'RCONFLICTS' in package_info and package_info['RCONFLICTS']: - for p in bb.utils.explode_deps(package_info['RCONFLICTS']): - packagedeps_objs.append(Package_Dependency( package = bp_object, - depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RCONFLICTS)) - - if packagedeps_objs: - Package_Dependency.objects.bulk_create(packagedeps_objs) - - return bp_object - - def save_build_variables(self, build_obj, vardump): - assert isinstance(build_obj, Build) - - for k in vardump: - desc = vardump[k]['doc'] - if desc is None: - var_words = [word for word in k.split('_')] - root_var = "_".join([word for word in var_words if word.isupper()]) - if root_var and root_var != k and root_var in vardump: - desc = vardump[root_var]['doc'] - if desc is None: - desc = '' - if desc: - HelpText.objects.get_or_create(build=build_obj, - area=HelpText.VARIABLE, - key=k, text=desc) - if not bool(vardump[k]['func']): - value = vardump[k]['v'] - if value is None: - value = '' - variable_obj = Variable.objects.create( build = build_obj, - variable_name = k, - variable_value = value, - description = desc) - - varhist_objects = [] - for vh in vardump[k]['history']: - if not 'documentation.conf' in vh['file']: - varhist_objects.append(VariableHistory( variable = variable_obj, - file_name = vh['file'], - line_number = vh['line'], - operation = vh['op'])) - if varhist_objects: - VariableHistory.objects.bulk_create(varhist_objects) - - -class MockEvent(object): - """ This object is used to create event, for which normal event-processing methods can - be used, out of data that is not coming via an actual event - """ - def __init__(self): - self.msg = None - self.levelno = None - self.taskname = None - self.taskhash = None - self.pathname = None - self.lineno = None - - def getMessage(self): - """ - Simulate LogRecord message return - """ - return self.msg - - -class BuildInfoHelper(object): - """ This class gathers the build information from the server and sends it - towards the ORM wrapper for storing in the database - It is instantiated once per build - Keeps in memory all data that needs matching before writing it to the database - """ - - # tasks which produce image files; note we include '', as we set - # the task for a target to '' (i.e. 'build') if no target is - # explicitly defined - IMAGE_GENERATING_TASKS = ['', 'build', 'image', 'populate_sdk_ext'] - - # pylint: disable=protected-access - # the code will look into the protected variables of the event; no easy way around this - # pylint: disable=bad-continuation - # we do not follow the python conventions for continuation indentation due to long lines here - - def __init__(self, server, has_build_history = False, brbe = None): - self.internal_state = {} - self.internal_state['taskdata'] = {} - self.internal_state['targets'] = [] - self.task_order = 0 - self.autocommit_step = 1 - self.server = server - self.orm_wrapper = ORMWrapper() - self.has_build_history = has_build_history - self.tmp_dir = self.server.runCommand(["getVariable", "TMPDIR"])[0] - - # this is set for Toaster-triggered builds by localhostbecontroller - # via toasterui - self.brbe = brbe - - self.project = None - - logger.debug("buildinfohelper: Build info helper inited %s" % vars(self)) - - - ################### - ## methods to convert event/external info into objects that the ORM layer uses - - def _ensure_build(self): - """ - Ensure the current build object exists and is up to date with - data on the bitbake server - """ - if not 'build' in self.internal_state or not self.internal_state['build']: - # create the Build object - self.internal_state['build'] = \ - self.orm_wrapper.get_or_create_build_object(self.brbe) - - build = self.internal_state['build'] - - # update missing fields on the Build object with found data - build_info = {} - - # set to True if at least one field is going to be set - changed = False - - if not build.build_name: - build_name = self.server.runCommand(["getVariable", "BUILDNAME"])[0] - - # only reset the build name if the one on the server is actually - # a valid value for the build_name field - if build_name is not None: - build_info['build_name'] = build_name - changed = True - - if not build.machine: - build_info['machine'] = self.server.runCommand(["getVariable", "MACHINE"])[0] - changed = True - - if not build.distro: - build_info['distro'] = self.server.runCommand(["getVariable", "DISTRO"])[0] - changed = True - - if not build.distro_version: - build_info['distro_version'] = self.server.runCommand(["getVariable", "DISTRO_VERSION"])[0] - changed = True - - if not build.bitbake_version: - build_info['bitbake_version'] = self.server.runCommand(["getVariable", "BB_VERSION"])[0] - changed = True - - if changed: - self.orm_wrapper.update_build(self.internal_state['build'], build_info) - - def _get_task_information(self, event, recipe): - assert 'taskname' in vars(event) - self._ensure_build() - - task_information = {} - task_information['build'] = self.internal_state['build'] - task_information['outcome'] = Task.OUTCOME_NA - task_information['recipe'] = recipe - task_information['task_name'] = event.taskname - try: - # some tasks don't come with a hash. and that's ok - task_information['sstate_checksum'] = event.taskhash - except AttributeError: - pass - return task_information - - def _get_layer_version_for_dependency(self, pathRE): - """ Returns the layer in the toaster db that has a full regex - match to the pathRE. pathRE - the layer path passed as a regex in the - event. It is created in cooker.py as a collection for the layer - priorities. - """ - self._ensure_build() - - def _sort_longest_path(layer_version): - assert isinstance(layer_version, Layer_Version) - return len(layer_version.local_path) - - # Our paths don't append a trailing slash - if pathRE.endswith("/"): - pathRE = pathRE[:-1] - - p = re.compile(pathRE) - path=re.sub(r'[$^]',r'',pathRE) - # Heuristics: we always match recipe to the deepest layer path in - # the discovered layers - for lvo in sorted(self.orm_wrapper.layer_version_objects, - reverse=True, key=_sort_longest_path): - if p.fullmatch(os.path.abspath(lvo.local_path)): - return lvo - if lvo.layer.local_source_dir: - if p.fullmatch(os.path.abspath(lvo.layer.local_source_dir)): - return lvo - if 0 == path.find(lvo.local_path): - # sub-layer path inside existing layer - return lvo - - # if we get here, we didn't read layers correctly; - # dump whatever information we have on the error log - logger.warning("Could not match layer dependency for path %s : %s", - pathRE, - self.orm_wrapper.layer_version_objects) - return None - - def _get_layer_version_for_path(self, path): - self._ensure_build() - - def _slkey_interactive(layer_version): - assert isinstance(layer_version, Layer_Version) - return len(layer_version.local_path) - - # Heuristics: we always match recipe to the deepest layer path in the discovered layers - for lvo in sorted(self.orm_wrapper.layer_version_objects, reverse=True, key=_slkey_interactive): - # we can match to the recipe file path - if path.startswith(lvo.local_path): - return lvo - if lvo.layer.local_source_dir and \ - path.startswith(lvo.layer.local_source_dir): - return lvo - - #if we get here, we didn't read layers correctly; dump whatever information we have on the error log - logger.warning("Could not match layer version for recipe path %s : %s", path, self.orm_wrapper.layer_version_objects) - - #mockup the new layer - unknown_layer, _ = Layer.objects.get_or_create(name="Unidentified layer", layer_index_url="") - unknown_layer_version_obj, _ = Layer_Version.objects.get_or_create(layer = unknown_layer, build = self.internal_state['build']) - - # append it so we don't run into this error again and again - self.orm_wrapper.layer_version_objects.append(unknown_layer_version_obj) - - return unknown_layer_version_obj - - def _get_recipe_information_from_taskfile(self, taskfile): - localfilepath = taskfile.split(":")[-1] - filepath_flags = ":".join(sorted(taskfile.split(":")[:-1])) - layer_version_obj = self._get_layer_version_for_path(localfilepath) - - - - recipe_info = {} - recipe_info['layer_version'] = layer_version_obj - recipe_info['file_path'] = localfilepath - recipe_info['pathflags'] = filepath_flags - - if recipe_info['file_path'].startswith(recipe_info['layer_version'].local_path): - recipe_info['file_path'] = recipe_info['file_path'][len(recipe_info['layer_version'].local_path):].lstrip("/") - else: - raise RuntimeError("Recipe file path %s is not under layer version at %s" % (recipe_info['file_path'], recipe_info['layer_version'].local_path)) - - return recipe_info - - - ################################ - ## external available methods to store information - @staticmethod - def _get_data_from_event(event): - evdata = None - if '_localdata' in vars(event): - evdata = event._localdata - elif 'data' in vars(event): - evdata = event.data - else: - raise Exception("Event with neither _localdata or data properties") - return evdata - - def store_layer_info(self, event): - layerinfos = BuildInfoHelper._get_data_from_event(event) - self.internal_state['lvs'] = {} - for layer in layerinfos: - try: - self.internal_state['lvs'][self.orm_wrapper.get_update_layer_object(layerinfos[layer], self.brbe)] = layerinfos[layer]['version'] - self.internal_state['lvs'][self.orm_wrapper.get_update_layer_object(layerinfos[layer], self.brbe)]['local_path'] = layerinfos[layer]['local_path'] - except NotExisting as nee: - logger.warning("buildinfohelper: cannot identify layer exception:%s ", nee) - - def store_started_build(self): - self._ensure_build() - - def save_build_log_file_path(self, build_log_path): - self._ensure_build() - - if not self.internal_state['build'].cooker_log_path: - data_dict = {'cooker_log_path': build_log_path} - self.orm_wrapper.update_build(self.internal_state['build'], data_dict) - - def save_build_targets(self, event): - self._ensure_build() - - # create target information - assert '_pkgs' in vars(event) - target_information = {} - target_information['targets'] = event._pkgs - target_information['build'] = self.internal_state['build'] - - self.internal_state['targets'] = self.orm_wrapper.get_or_create_targets(target_information) - - def save_build_layers_and_variables(self): - self._ensure_build() - - build_obj = self.internal_state['build'] - - # save layer version information for this build - if not 'lvs' in self.internal_state: - logger.error("Layer version information not found; Check if the bitbake server was configured to inherit toaster.bbclass.") - else: - for layer_obj in self.internal_state['lvs']: - self.orm_wrapper.get_update_layer_version_object(build_obj, layer_obj, self.internal_state['lvs'][layer_obj]) - - del self.internal_state['lvs'] - - # Save build configuration - data = self.server.runCommand(["getAllKeysWithFlags", ["doc", "func"]])[0] - - # convert the paths from absolute to relative to either the build directory or layer checkouts - path_prefixes = [] - - if self.brbe is not None: - _, be_id = self.brbe.split(":") - be = BuildEnvironment.objects.get(pk = be_id) - path_prefixes.append(be.builddir) - - for layer in sorted(self.orm_wrapper.layer_version_objects, key = lambda x:len(x.local_path), reverse=True): - path_prefixes.append(layer.local_path) - - # we strip the prefixes - for k in data: - if not bool(data[k]['func']): - for vh in data[k]['history']: - if not 'documentation.conf' in vh['file']: - abs_file_name = vh['file'] - for pp in path_prefixes: - if abs_file_name.startswith(pp + "/"): - # preserve layer name in relative path - vh['file']=abs_file_name[pp.rfind("/")+1:] - break - - # save the variables - self.orm_wrapper.save_build_variables(build_obj, data) - - return self.brbe - - def set_recipes_to_parse(self, num_recipes): - """ - Set the number of recipes which need to be parsed for this build. - This is set the first time ParseStarted is received by toasterui. - """ - self._ensure_build() - self.internal_state['build'].recipes_to_parse = num_recipes - self.internal_state['build'].save() - - def set_recipes_parsed(self, num_recipes): - """ - Set the number of recipes parsed so far for this build; this is updated - each time a ParseProgress or ParseCompleted event is received by - toasterui. - """ - self._ensure_build() - if num_recipes <= self.internal_state['build'].recipes_to_parse: - self.internal_state['build'].recipes_parsed = num_recipes - self.internal_state['build'].save() - - def update_target_image_file(self, event): - evdata = BuildInfoHelper._get_data_from_event(event) - - for t in self.internal_state['targets']: - if t.is_image: - output_files = list(evdata.keys()) - for output in output_files: - if t.target in output and 'rootfs' in output and not output.endswith(".manifest"): - self.orm_wrapper.save_target_image_file_information(t, output, evdata[output]) - - def update_artifact_image_file(self, event): - self._ensure_build() - evdata = BuildInfoHelper._get_data_from_event(event) - for artifact_path in evdata.keys(): - self.orm_wrapper.save_artifact_information( - self.internal_state['build'], artifact_path, - evdata[artifact_path]) - - def update_build_information(self, event, errors, warnings, taskfailures): - self._ensure_build() - self.orm_wrapper.update_build_stats_and_outcome( - self.internal_state['build'], errors, warnings, taskfailures) - - def store_started_task(self, event): - assert isinstance(event, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted, bb.runqueue.runQueueTaskSkipped)) - assert 'taskfile' in vars(event) - localfilepath = event.taskfile.split(":")[-1] - assert localfilepath.startswith("/") - - identifier = event.taskfile + ":" + event.taskname - - recipe_information = self._get_recipe_information_from_taskfile(event.taskfile) - recipe = self.orm_wrapper.get_update_recipe_object(recipe_information, True) - - task_information = self._get_task_information(event, recipe) - task_information['outcome'] = Task.OUTCOME_NA - - if isinstance(event, bb.runqueue.runQueueTaskSkipped): - assert 'reason' in vars(event) - task_information['task_executed'] = False - if event.reason == "covered": - task_information['outcome'] = Task.OUTCOME_COVERED - if event.reason == "existing": - task_information['outcome'] = Task.OUTCOME_PREBUILT - else: - task_information['task_executed'] = True - if 'noexec' in vars(event) and event.noexec: - task_information['task_executed'] = False - task_information['outcome'] = Task.OUTCOME_EMPTY - task_information['script_type'] = Task.CODING_NA - - # do not assign order numbers to scene tasks - if not isinstance(event, bb.runqueue.sceneQueueTaskStarted): - self.task_order += 1 - task_information['order'] = self.task_order - - self.orm_wrapper.get_update_task_object(task_information) - - self.internal_state['taskdata'][identifier] = { - 'outcome': task_information['outcome'], - } - - - def store_tasks_stats(self, event): - self._ensure_build() - task_data = BuildInfoHelper._get_data_from_event(event) - - for (task_file, task_name, task_stats, recipe_name) in task_data: - build = self.internal_state['build'] - self.orm_wrapper.update_task_object(build, task_name, recipe_name, task_stats) - - def update_and_store_task(self, event): - assert 'taskfile' in vars(event) - localfilepath = event.taskfile.split(":")[-1] - assert localfilepath.startswith("/") - - identifier = event.taskfile + ":" + event.taskname - if not identifier in self.internal_state['taskdata']: - if isinstance(event, bb.build.TaskBase): - # we do a bit of guessing - candidates = [x for x in self.internal_state['taskdata'].keys() if x.endswith(identifier)] - if len(candidates) == 1: - identifier = candidates[0] - elif len(candidates) > 1 and hasattr(event,'_package'): - if 'native-' in event._package: - identifier = 'native:' + identifier - if 'nativesdk-' in event._package: - identifier = 'nativesdk:' + identifier - candidates = [x for x in self.internal_state['taskdata'].keys() if x.endswith(identifier)] - if len(candidates) == 1: - identifier = candidates[0] - - assert identifier in self.internal_state['taskdata'] - identifierlist = identifier.split(":") - realtaskfile = ":".join(identifierlist[0:len(identifierlist)-1]) - recipe_information = self._get_recipe_information_from_taskfile(realtaskfile) - recipe = self.orm_wrapper.get_update_recipe_object(recipe_information, True) - task_information = self._get_task_information(event,recipe) - - task_information['outcome'] = self.internal_state['taskdata'][identifier]['outcome'] - - if 'logfile' in vars(event): - task_information['logfile'] = event.logfile - - if '_message' in vars(event): - task_information['message'] = event._message - - if 'taskflags' in vars(event): - # with TaskStarted, we get even more information - if 'python' in event.taskflags.keys() and event.taskflags['python'] == '1': - task_information['script_type'] = Task.CODING_PYTHON - else: - task_information['script_type'] = Task.CODING_SHELL - - if task_information['outcome'] == Task.OUTCOME_NA: - if isinstance(event, (bb.runqueue.runQueueTaskCompleted, bb.runqueue.sceneQueueTaskCompleted)): - task_information['outcome'] = Task.OUTCOME_SUCCESS - del self.internal_state['taskdata'][identifier] - - if isinstance(event, (bb.runqueue.runQueueTaskFailed, bb.runqueue.sceneQueueTaskFailed)): - task_information['outcome'] = Task.OUTCOME_FAILED - del self.internal_state['taskdata'][identifier] - - # we force a sync point here, to get the progress bar to show - if self.autocommit_step % 3 == 0: - transaction.set_autocommit(True) - transaction.set_autocommit(False) - self.autocommit_step += 1 - - self.orm_wrapper.get_update_task_object(task_information, True) # must exist - - - def store_missed_state_tasks(self, event): - for (fn, taskname, taskhash, sstatefile) in BuildInfoHelper._get_data_from_event(event)['missed']: - - # identifier = fn + taskname + "_setscene" - recipe_information = self._get_recipe_information_from_taskfile(fn) - recipe = self.orm_wrapper.get_update_recipe_object(recipe_information) - mevent = MockEvent() - mevent.taskname = taskname - mevent.taskhash = taskhash - task_information = self._get_task_information(mevent,recipe) - - task_information['start_time'] = timezone.now() - task_information['outcome'] = Task.OUTCOME_NA - task_information['sstate_checksum'] = taskhash - task_information['sstate_result'] = Task.SSTATE_MISS - task_information['path_to_sstate_obj'] = sstatefile - - self.orm_wrapper.get_update_task_object(task_information) - - for (fn, taskname, taskhash, sstatefile) in BuildInfoHelper._get_data_from_event(event)['found']: - - # identifier = fn + taskname + "_setscene" - recipe_information = self._get_recipe_information_from_taskfile(fn) - recipe = self.orm_wrapper.get_update_recipe_object(recipe_information) - mevent = MockEvent() - mevent.taskname = taskname - mevent.taskhash = taskhash - task_information = self._get_task_information(mevent,recipe) - - task_information['path_to_sstate_obj'] = sstatefile - - self.orm_wrapper.get_update_task_object(task_information) - - - def store_target_package_data(self, event): - self._ensure_build() - - # for all image targets - for target in self.internal_state['targets']: - if target.is_image: - pkgdata = BuildInfoHelper._get_data_from_event(event)['pkgdata'] - imgdata = BuildInfoHelper._get_data_from_event(event)['imgdata'].get(target.target, {}) - filedata = BuildInfoHelper._get_data_from_event(event)['filedata'].get(target.target, {}) - - try: - self.orm_wrapper.save_target_package_information(self.internal_state['build'], target, imgdata, pkgdata, self.internal_state['recipes'], built_package=True) - self.orm_wrapper.save_target_package_information(self.internal_state['build'], target, imgdata.copy(), pkgdata, self.internal_state['recipes'], built_package=False) - except KeyError as e: - logger.warning("KeyError in save_target_package_information" - "%s ", e) - - # only try to find files in the image if the task for this - # target is one which produces image files; otherwise, the old - # list of files in the files-in-image.txt file will be - # appended to the target even if it didn't produce any images - if target.task in BuildInfoHelper.IMAGE_GENERATING_TASKS: - try: - self.orm_wrapper.save_target_file_information(self.internal_state['build'], target, filedata) - except KeyError as e: - logger.warning("KeyError in save_target_file_information" - "%s ", e) - - - - def cancel_cli_build(self): - """ - If a build is currently underway, set its state to CANCELLED; - note that this only gets called for command line builds which are - interrupted, so it doesn't touch any BuildRequest objects - """ - self._ensure_build() - self.internal_state['build'].outcome = Build.CANCELLED - self.internal_state['build'].save() - signal_runbuilds() - - def store_dependency_information(self, event): - assert '_depgraph' in vars(event) - assert 'layer-priorities' in event._depgraph - assert 'pn' in event._depgraph - assert 'tdepends' in event._depgraph - - errormsg = [] - - # save layer version priorities - if 'layer-priorities' in event._depgraph.keys(): - for lv in event._depgraph['layer-priorities']: - (_, path, _, priority) = lv - layer_version_obj = self._get_layer_version_for_dependency(path) - if layer_version_obj: - layer_version_obj.priority = priority - layer_version_obj.save() - - # save recipe information - self.internal_state['recipes'] = {} - for pn in event._depgraph['pn']: - - file_name = event._depgraph['pn'][pn]['filename'].split(":")[-1] - pathflags = ":".join(sorted(event._depgraph['pn'][pn]['filename'].split(":")[:-1])) - layer_version_obj = self._get_layer_version_for_path(file_name) - - assert layer_version_obj is not None - - recipe_info = {} - recipe_info['name'] = pn - recipe_info['layer_version'] = layer_version_obj - - if 'version' in event._depgraph['pn'][pn]: - recipe_info['version'] = event._depgraph['pn'][pn]['version'].lstrip(":") - - if 'summary' in event._depgraph['pn'][pn]: - recipe_info['summary'] = event._depgraph['pn'][pn]['summary'] - - if 'license' in event._depgraph['pn'][pn]: - recipe_info['license'] = event._depgraph['pn'][pn]['license'] - - if 'description' in event._depgraph['pn'][pn]: - recipe_info['description'] = event._depgraph['pn'][pn]['description'] - - if 'section' in event._depgraph['pn'][pn]: - recipe_info['section'] = event._depgraph['pn'][pn]['section'] - - if 'homepage' in event._depgraph['pn'][pn]: - recipe_info['homepage'] = event._depgraph['pn'][pn]['homepage'] - - if 'bugtracker' in event._depgraph['pn'][pn]: - recipe_info['bugtracker'] = event._depgraph['pn'][pn]['bugtracker'] - - recipe_info['file_path'] = file_name - recipe_info['pathflags'] = pathflags - - if recipe_info['file_path'].startswith(recipe_info['layer_version'].local_path): - recipe_info['file_path'] = recipe_info['file_path'][len(recipe_info['layer_version'].local_path):].lstrip("/") - else: - raise RuntimeError("Recipe file path %s is not under layer version at %s" % (recipe_info['file_path'], recipe_info['layer_version'].local_path)) - - recipe = self.orm_wrapper.get_update_recipe_object(recipe_info) - recipe.is_image = False - if 'inherits' in event._depgraph['pn'][pn].keys(): - for cls in event._depgraph['pn'][pn]['inherits']: - if cls.endswith('/image.bbclass'): - recipe.is_image = True - recipe_info['is_image'] = True - # Save the is_image state to the relevant recipe objects - self.orm_wrapper.get_update_recipe_object(recipe_info) - break - if recipe.is_image: - for t in self.internal_state['targets']: - if pn == t.target: - t.is_image = True - t.save() - self.internal_state['recipes'][pn] = recipe - - # we'll not get recipes for key w/ values listed in ASSUME_PROVIDED - - assume_provided = self.server.runCommand(["getVariable", "ASSUME_PROVIDED"])[0].split() - - # save recipe dependency - # buildtime - recipedeps_objects = [] - for recipe in event._depgraph['depends']: - target = self.internal_state['recipes'][recipe] - for dep in event._depgraph['depends'][recipe]: - if dep in assume_provided: - continue - via = None - if 'providermap' in event._depgraph and dep in event._depgraph['providermap']: - deprecipe = event._depgraph['providermap'][dep][0] - dependency = self.internal_state['recipes'][deprecipe] - via = Provides.objects.get_or_create(name=dep, - recipe=dependency)[0] - elif dep in self.internal_state['recipes']: - dependency = self.internal_state['recipes'][dep] - else: - errormsg.append(" stpd: KeyError saving recipe dependency for %s, %s \n" % (recipe, dep)) - continue - recipe_dep = Recipe_Dependency(recipe=target, - depends_on=dependency, - via=via, - dep_type=Recipe_Dependency.TYPE_DEPENDS) - recipedeps_objects.append(recipe_dep) - - Recipe_Dependency.objects.bulk_create(recipedeps_objects) - - # save all task information - def _save_a_task(taskdesc): - spec = re.split(r'\.', taskdesc) - pn = ".".join(spec[0:-1]) - taskname = spec[-1] - e = event - e.taskname = pn - recipe = self.internal_state['recipes'][pn] - task_info = self._get_task_information(e, recipe) - task_info['task_name'] = taskname - task_obj = self.orm_wrapper.get_update_task_object(task_info) - return task_obj - - # create tasks - tasks = {} - for taskdesc in event._depgraph['tdepends']: - tasks[taskdesc] = _save_a_task(taskdesc) - - # create dependencies between tasks - taskdeps_objects = [] - for taskdesc in event._depgraph['tdepends']: - target = tasks[taskdesc] - for taskdep in event._depgraph['tdepends'][taskdesc]: - if taskdep not in tasks: - # Fetch tasks info is not collected previously - dep = _save_a_task(taskdep) - else: - dep = tasks[taskdep] - taskdeps_objects.append(Task_Dependency( task = target, depends_on = dep )) - Task_Dependency.objects.bulk_create(taskdeps_objects) - - if errormsg: - logger.warning("buildinfohelper: dependency info not identify recipes: \n%s", "".join(errormsg)) - - - def store_build_package_information(self, event): - self._ensure_build() - - package_info = BuildInfoHelper._get_data_from_event(event) - self.orm_wrapper.save_build_package_information( - self.internal_state['build'], - package_info, - self.internal_state['recipes'], - built_package=True) - - self.orm_wrapper.save_build_package_information( - self.internal_state['build'], - package_info, - self.internal_state['recipes'], - built_package=False) - - def _store_build_done(self, errorcode): - logger.info("Build exited with errorcode %d", errorcode) - - if not self.brbe: - return - - br_id, be_id = self.brbe.split(":") - - br = BuildRequest.objects.get(pk = br_id) - - # if we're 'done' because we got cancelled update the build outcome - if br.state == BuildRequest.REQ_CANCELLING: - logger.info("Build cancelled") - br.build.outcome = Build.CANCELLED - br.build.save() - self.internal_state['build'] = br.build - errorcode = 0 - - if errorcode == 0: - # request archival of the project artifacts - br.state = BuildRequest.REQ_COMPLETED - else: - br.state = BuildRequest.REQ_FAILED - br.save() - - be = BuildEnvironment.objects.get(pk = be_id) - be.lock = BuildEnvironment.LOCK_FREE - be.save() - signal_runbuilds() - - def store_log_error(self, text): - mockevent = MockEvent() - mockevent.levelno = formatter.ERROR - mockevent.msg = text - mockevent.pathname = '-- None' - mockevent.lineno = LogMessage.ERROR - self.store_log_event(mockevent) - - def store_log_exception(self, text, backtrace = ""): - mockevent = MockEvent() - mockevent.levelno = -1 - mockevent.msg = text - mockevent.pathname = backtrace - mockevent.lineno = -1 - self.store_log_event(mockevent) - - def store_log_event(self, event,cli_backlog=True): - self._ensure_build() - - if event.levelno < formatter.WARNING: - return - - # early return for CLI builds - if cli_backlog and self.brbe is None: - if not 'backlog' in self.internal_state: - self.internal_state['backlog'] = [] - self.internal_state['backlog'].append(event) - return - - if 'backlog' in self.internal_state: - # if we have a backlog of events, do our best to save them here - if self.internal_state['backlog']: - tempevent = self.internal_state['backlog'].pop() - logger.debug("buildinfohelper: Saving stored event %s " - % tempevent) - self.store_log_event(tempevent,cli_backlog) - else: - logger.info("buildinfohelper: All events saved") - del self.internal_state['backlog'] - - log_information = {} - log_information['build'] = self.internal_state['build'] - if event.levelno == formatter.CRITICAL: - log_information['level'] = LogMessage.CRITICAL - elif event.levelno == formatter.ERROR: - log_information['level'] = LogMessage.ERROR - elif event.levelno == formatter.WARNING: - log_information['level'] = LogMessage.WARNING - elif event.levelno == -2: # toaster self-logging - log_information['level'] = -2 - else: - log_information['level'] = LogMessage.INFO - - log_information['message'] = event.getMessage() - log_information['pathname'] = event.pathname - log_information['lineno'] = event.lineno - logger.info("Logging error 2: %s", log_information) - - self.orm_wrapper.create_logmessage(log_information) - - def _get_filenames_from_image_license(self, image_license_manifest_path): - """ - Find the FILES line in the image_license.manifest file, - which has the basenames of the bzImage and modules files - in this format: - FILES: bzImage--4.4.11+git0+3a5f494784_53e84104c5-r0-qemux86-20160603165040.bin modules--4.4.11+git0+3a5f494784_53e84104c5-r0-qemux86-20160603165040.tgz - """ - files = [] - with open(image_license_manifest_path) as image_license: - for line in image_license: - if line.startswith('FILES'): - files_str = line.split(':')[1].strip() - files_str = re.sub(r' {2,}', ' ', files_str) - - # ignore lines like "FILES:" with no filenames - if files_str: - files += files_str.split(' ') - return files - - def _endswith(self, str_to_test, endings): - """ - Returns True if str ends with one of the strings in the list - endings, False otherwise - """ - endswith = False - for ending in endings: - if str_to_test.endswith(ending): - endswith = True - break - return endswith - - def scan_task_artifacts(self, event): - """ - The 'TaskArtifacts' event passes the manifest file content for the - tasks 'do_deploy', 'do_image_complete', 'do_populate_sdk', and - 'do_populate_sdk_ext'. The first two will be implemented later. - """ - task_vars = BuildInfoHelper._get_data_from_event(event) - task_name = task_vars['task'][task_vars['task'].find(':')+1:] - task_artifacts = task_vars['artifacts'] - - if task_name in ['do_populate_sdk', 'do_populate_sdk_ext']: - targets = [target for target in self.internal_state['targets'] \ - if target.task == task_name[3:]] - if not targets: - logger.warning("scan_task_artifacts: SDK targets not found: %s\n", task_name) - return - for artifact_path in task_artifacts: - if not os.path.isfile(artifact_path): - logger.warning("scan_task_artifacts: artifact file not found: %s\n", artifact_path) - continue - for target in targets: - # don't record the file if it's already been added - # to this target - matching_files = TargetSDKFile.objects.filter( - target=target, file_name=artifact_path) - if matching_files.count() == 0: - artifact_size = os.stat(artifact_path).st_size - self.orm_wrapper.save_target_sdk_file( - target, artifact_path, artifact_size) - - def _get_image_files(self, deploy_dir_image, image_name, image_file_extensions): - """ - Find files in deploy_dir_image whose basename starts with the - string image_name and ends with one of the strings in - image_file_extensions. - - Returns a list of file dictionaries like - - [ - { - 'path': '/path/to/image/file', - 'size': - } - ] - """ - image_files = [] - - for dirpath, _, filenames in os.walk(deploy_dir_image): - for filename in filenames: - if filename.startswith(image_name) and \ - self._endswith(filename, image_file_extensions): - image_file_path = os.path.join(dirpath, filename) - image_file_size = os.stat(image_file_path).st_size - - image_files.append({ - 'path': image_file_path, - 'size': image_file_size - }) - - return image_files - - def scan_image_artifacts(self): - """ - Scan for built image artifacts in DEPLOY_DIR_IMAGE and associate them - with a Target object in self.internal_state['targets']. - - We have two situations to handle: - - 1. This is the first time a target + machine has been built, so - add files from the DEPLOY_DIR_IMAGE to the target. - - OR - - 2. There are no new files for the target (they were already produced by - a previous build), so copy them from the most recent previous build with - the same target, task and machine. - """ - deploy_dir_image = \ - self.server.runCommand(['getVariable', 'DEPLOY_DIR_IMAGE'])[0] - - # if there's no DEPLOY_DIR_IMAGE, there aren't going to be - # any image artifacts, so we can return immediately - if not deploy_dir_image: - return - - buildname = self.server.runCommand(['getVariable', 'BUILDNAME'])[0] - machine = self.server.runCommand(['getVariable', 'MACHINE'])[0] - - # location of the manifest files for this build; - # note that this file is only produced if an image is produced - license_directory = \ - self.server.runCommand(['getVariable', 'LICENSE_DIRECTORY'])[0] - - # file name extensions for image files - image_file_extensions_unique = {} - image_fstypes = self.server.runCommand( - ['getVariable', 'IMAGE_FSTYPES'])[0] - if image_fstypes is not None: - image_types_str = image_fstypes.strip() - image_file_extensions = re.sub(r' {2,}', ' ', image_types_str) - image_file_extensions_unique = set(image_file_extensions.split(' ')) - - targets = self.internal_state['targets'] - - # filter out anything which isn't an image target - image_targets = [target for target in targets if target.is_image] - - if len(image_targets) > 0: - #if there are image targets retrieve image_name - image_name = self.server.runCommand(['getVariable', 'IMAGE_NAME'])[0] - if not image_name: - #When build target is an image and image_name is not found as an environment variable - logger.info("IMAGE_NAME not found, extracting from bitbake command") - cmd = self.server.runCommand(['getVariable','BB_CMDLINE'])[0] - #filter out tokens that are command line options - cmd = [token for token in cmd if not token.startswith('-')] - image_name = cmd[1].split(':', 1)[0] # remove everything after : in image name - logger.info("IMAGE_NAME found as : %s " % image_name) - - for image_target in image_targets: - # this is set to True if we find at least one file relating to - # this target; if this remains False after the scan, we copy the - # files from the most-recent Target with the same target + machine - # onto this Target instead - has_files = False - - # we construct this because by the time we reach - # BuildCompleted, this has reset to - # 'defaultpkgname--'; - # we need to change it to - # -- - real_image_name = re.sub(r'^defaultpkgname', image_target.target, - image_name) - - image_license_manifest_path = os.path.join( - license_directory, - real_image_name, - 'image_license.manifest') - - image_package_manifest_path = os.path.join( - license_directory, - real_image_name, - 'image_license.manifest') - - # if image_license.manifest exists, we can read the names of - # bzImage, modules etc. files for this build from it, then look for - # them in the DEPLOY_DIR_IMAGE; note that this file is only produced - # if an image file was produced - if os.path.isfile(image_license_manifest_path): - has_files = True - - basenames = self._get_filenames_from_image_license( - image_license_manifest_path) - - for basename in basenames: - artifact_path = os.path.join(deploy_dir_image, basename) - if not os.path.exists(artifact_path): - logger.warning("artifact %s doesn't exist, skipping" % artifact_path) - continue - artifact_size = os.stat(artifact_path).st_size - - # note that the artifact will only be saved against this - # build if it hasn't been already - self.orm_wrapper.save_target_kernel_file(image_target, - artifact_path, artifact_size) - - # store the license manifest path on the target - # (this file is also created any time an image file is created) - license_manifest_path = os.path.join(license_directory, - real_image_name, 'license.manifest') - - self.orm_wrapper.update_target_set_license_manifest( - image_target, license_manifest_path) - - # store the package manifest path on the target (this file - # is created any time an image file is created) - package_manifest_path = os.path.join(deploy_dir_image, - real_image_name + '.rootfs.manifest') - - if os.path.exists(package_manifest_path): - self.orm_wrapper.update_target_set_package_manifest( - image_target, package_manifest_path) - - # scan the directory for image files relating to this build - # (via real_image_name); note that we don't have to set - # has_files = True, as searching for the license manifest file - # will already have set it to true if at least one image file was - # produced; note that the real_image_name includes BUILDNAME, which - # in turn includes a timestamp; so if no files were produced for - # this timestamp (i.e. the build reused existing image files already - # in the directory), no files will be recorded against this target - image_files = self._get_image_files(deploy_dir_image, - real_image_name, image_file_extensions_unique) - - for image_file in image_files: - self.orm_wrapper.save_target_image_file_information( - image_target, image_file['path'], image_file['size']) - - if not has_files: - # copy image files and build artifacts from the - # most-recently-built Target with the - # same target + machine as this Target; also copy the license - # manifest path, as that is not treated as an artifact and needs - # to be set separately - similar_target = \ - self.orm_wrapper.get_similar_target_with_image_files( - image_target) - - if similar_target: - logger.info('image artifacts for target %s cloned from ' \ - 'target %s' % (image_target.pk, similar_target.pk)) - self.orm_wrapper.clone_image_artifacts(similar_target, - image_target) - - def _get_sdk_targets(self): - """ - Return targets which could generate SDK artifacts, i.e. - "do_populate_sdk" and "do_populate_sdk_ext". - """ - return [target for target in self.internal_state['targets'] \ - if target.task in ['populate_sdk', 'populate_sdk_ext']] - - def scan_sdk_artifacts(self, event): - """ - Note that we have to intercept an SDKArtifactInfo event from - toaster.bbclass (via toasterui) to get hold of the SDK variables we - need to be able to scan for files accurately: this is because - variables like TOOLCHAIN_OUTPUTNAME have reset to None by the time - BuildCompleted is fired by bitbake, so we have to get those values - while the build is still in progress. - - For populate_sdk_ext, this runs twice, with two different - TOOLCHAIN_OUTPUTNAME settings, each of which will capture some of the - files in the SDK output directory. - """ - sdk_vars = BuildInfoHelper._get_data_from_event(event) - toolchain_outputname = sdk_vars['TOOLCHAIN_OUTPUTNAME'] - - # targets which might have created SDK artifacts - sdk_targets = self._get_sdk_targets() - - # location of SDK artifacts - tmpdir = self.server.runCommand(['getVariable', 'TMPDIR'])[0] - sdk_dir = os.path.join(tmpdir, 'deploy', 'sdk') - - # all files in the SDK directory - artifacts = [] - for dir_path, _, filenames in os.walk(sdk_dir): - for filename in filenames: - full_path = os.path.join(dir_path, filename) - if not os.path.islink(full_path): - artifacts.append(full_path) - - for sdk_target in sdk_targets: - # find files in the SDK directory which haven't already been - # recorded against a Target and whose basename matches - # TOOLCHAIN_OUTPUTNAME - for artifact_path in artifacts: - basename = os.path.basename(artifact_path) - - toolchain_match = basename.startswith(toolchain_outputname) - - # files which match the name of the target which produced them; - # for example, - # poky-glibc-x86_64-core-image-sato-i586-toolchain-ext-2.1+snapshot.sh - target_match = re.search(sdk_target.target, basename) - - # targets which produce "*-nativesdk-*" files - is_ext_sdk_target = sdk_target.task in \ - ['do_populate_sdk_ext', 'populate_sdk_ext'] - - # SDK files which don't match the target name, i.e. - # x86_64-nativesdk-libc.* - # poky-glibc-x86_64-buildtools-tarball-i586-buildtools-nativesdk-standalone-2.1+snapshot* - is_ext_sdk_file = re.search('-nativesdk-', basename) - - file_from_target = (toolchain_match and target_match) or \ - (is_ext_sdk_target and is_ext_sdk_file) - - if file_from_target: - # don't record the file if it's already been added to this - # target - matching_files = TargetSDKFile.objects.filter( - target=sdk_target, file_name=artifact_path) - - if matching_files.count() == 0: - artifact_size = os.stat(artifact_path).st_size - - self.orm_wrapper.save_target_sdk_file( - sdk_target, artifact_path, artifact_size) - - def clone_required_sdk_artifacts(self): - """ - If an SDK target doesn't have any SDK artifacts, this means that - the postfuncs of populate_sdk or populate_sdk_ext didn't fire, which - in turn means that the targets of this build didn't generate any new - artifacts. - - In this case, clone SDK artifacts for targets in the current build - from existing targets for this build. - """ - sdk_targets = self._get_sdk_targets() - for sdk_target in sdk_targets: - # only clone for SDK targets which have no TargetSDKFiles yet - if sdk_target.targetsdkfile_set.all().count() == 0: - similar_target = \ - self.orm_wrapper.get_similar_target_with_sdk_files( - sdk_target) - if similar_target: - logger.info('SDK artifacts for target %s cloned from ' \ - 'target %s' % (sdk_target.pk, similar_target.pk)) - self.orm_wrapper.clone_sdk_artifacts(similar_target, - sdk_target) - - def close(self, errorcode): - self._store_build_done(errorcode) - - if 'backlog' in self.internal_state: - # we save missed events in the database for the current build - tempevent = self.internal_state['backlog'].pop() - # Do not skip command line build events - self.store_log_event(tempevent,False) - - - # unset the brbe; this is to prevent subsequent command-line builds - # being incorrectly attached to the previous Toaster-triggered build; - # see https://bugzilla.yoctoproject.org/show_bug.cgi?id=9021 - self.brbe = None - - # unset the internal Build object to prevent it being reused for the - # next build - self.internal_state['build'] = None diff --git a/bitbake/lib/bb/ui/eventreplay.py b/bitbake/lib/bb/ui/eventreplay.py deleted file mode 100644 index d62ecbfa56..0000000000 --- a/bitbake/lib/bb/ui/eventreplay.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python3 -# -# SPDX-License-Identifier: GPL-2.0-only -# -# This file re-uses code spread throughout other Bitbake source files. -# As such, all other copyrights belong to their own right holders. -# - - -import os -import sys -import json -import pickle -import codecs - - -class EventPlayer: - """Emulate a connection to a bitbake server.""" - - def __init__(self, eventfile, variables): - self.eventfile = eventfile - self.variables = variables - self.eventmask = [] - - def waitEvent(self, _timeout): - """Read event from the file.""" - line = self.eventfile.readline().strip() - if not line: - return - try: - decodedline = json.loads(line) - if 'allvariables' in decodedline: - self.variables = decodedline['allvariables'] - return - if not 'vars' in decodedline: - raise ValueError - event_str = decodedline['vars'].encode('utf-8') - event = pickle.loads(codecs.decode(event_str, 'base64')) - event_name = "%s.%s" % (event.__module__, event.__class__.__name__) - if event_name not in self.eventmask: - return - return event - except ValueError as err: - print("Failed loading ", line) - raise err - - def runCommand(self, command_line): - """Emulate running a command on the server.""" - name = command_line[0] - - if name == "getVariable": - var_name = command_line[1] - variable = self.variables.get(var_name) - if variable: - return variable['v'], None - return None, "Missing variable %s" % var_name - - elif name == "getAllKeysWithFlags": - dump = {} - flaglist = command_line[1] - for key, val in self.variables.items(): - try: - if not key.startswith("__"): - dump[key] = { - 'v': val['v'], - 'history' : val['history'], - } - for flag in flaglist: - dump[key][flag] = val[flag] - except Exception as err: - print(err) - return (dump, None) - - elif name == 'setEventMask': - self.eventmask = command_line[-1] - return True, None - - else: - raise Exception("Command %s not implemented" % command_line[0]) - - def getEventHandle(self): - """ - This method is called by toasterui. - The return value is passed to self.runCommand but not used there. - """ - pass diff --git a/bitbake/lib/bb/ui/icons/images/images_display.png b/bitbake/lib/bb/ui/icons/images/images_display.png deleted file mode 100644 index a7f87101af..0000000000 Binary files a/bitbake/lib/bb/ui/icons/images/images_display.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/images/images_hover.png b/bitbake/lib/bb/ui/icons/images/images_hover.png deleted file mode 100644 index 2d9cd99b8e..0000000000 Binary files a/bitbake/lib/bb/ui/icons/images/images_hover.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/add-hover.png b/bitbake/lib/bb/ui/icons/indicators/add-hover.png deleted file mode 100644 index 526df770d1..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/add-hover.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/add.png b/bitbake/lib/bb/ui/icons/indicators/add.png deleted file mode 100644 index 31e7090d61..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/add.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/alert.png b/bitbake/lib/bb/ui/icons/indicators/alert.png deleted file mode 100644 index d1c6f55a2f..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/alert.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/confirmation.png b/bitbake/lib/bb/ui/icons/indicators/confirmation.png deleted file mode 100644 index 3a5402d1e3..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/confirmation.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/denied.png b/bitbake/lib/bb/ui/icons/indicators/denied.png deleted file mode 100644 index ee35c7defa..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/denied.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/error.png b/bitbake/lib/bb/ui/icons/indicators/error.png deleted file mode 100644 index d06a8c151a..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/error.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/info.png b/bitbake/lib/bb/ui/icons/indicators/info.png deleted file mode 100644 index ee8e8d8462..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/info.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/issues.png b/bitbake/lib/bb/ui/icons/indicators/issues.png deleted file mode 100644 index b0c7461334..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/issues.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/refresh.png b/bitbake/lib/bb/ui/icons/indicators/refresh.png deleted file mode 100644 index eb6c419db8..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/refresh.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/remove-hover.png b/bitbake/lib/bb/ui/icons/indicators/remove-hover.png deleted file mode 100644 index aa57c69982..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/remove-hover.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/remove.png b/bitbake/lib/bb/ui/icons/indicators/remove.png deleted file mode 100644 index 05c3c293d4..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/remove.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/indicators/tick.png b/bitbake/lib/bb/ui/icons/indicators/tick.png deleted file mode 100644 index beaad361c3..0000000000 Binary files a/bitbake/lib/bb/ui/icons/indicators/tick.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/info/info_display.png b/bitbake/lib/bb/ui/icons/info/info_display.png deleted file mode 100644 index 5afbba29f5..0000000000 Binary files a/bitbake/lib/bb/ui/icons/info/info_display.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/info/info_hover.png b/bitbake/lib/bb/ui/icons/info/info_hover.png deleted file mode 100644 index f9d294dfae..0000000000 Binary files a/bitbake/lib/bb/ui/icons/info/info_hover.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/layers/layers_display.png b/bitbake/lib/bb/ui/icons/layers/layers_display.png deleted file mode 100644 index b7f9053a9e..0000000000 Binary files a/bitbake/lib/bb/ui/icons/layers/layers_display.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/layers/layers_hover.png b/bitbake/lib/bb/ui/icons/layers/layers_hover.png deleted file mode 100644 index 0bf3ce0dbc..0000000000 Binary files a/bitbake/lib/bb/ui/icons/layers/layers_hover.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/packages/packages_display.png b/bitbake/lib/bb/ui/icons/packages/packages_display.png deleted file mode 100644 index f5d0a5064d..0000000000 Binary files a/bitbake/lib/bb/ui/icons/packages/packages_display.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/packages/packages_hover.png b/bitbake/lib/bb/ui/icons/packages/packages_hover.png deleted file mode 100644 index c081165f34..0000000000 Binary files a/bitbake/lib/bb/ui/icons/packages/packages_hover.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/recipe/recipe_display.png b/bitbake/lib/bb/ui/icons/recipe/recipe_display.png deleted file mode 100644 index e9809bc7d9..0000000000 Binary files a/bitbake/lib/bb/ui/icons/recipe/recipe_display.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/recipe/recipe_hover.png b/bitbake/lib/bb/ui/icons/recipe/recipe_hover.png deleted file mode 100644 index 7e48da9af0..0000000000 Binary files a/bitbake/lib/bb/ui/icons/recipe/recipe_hover.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/settings/settings_display.png b/bitbake/lib/bb/ui/icons/settings/settings_display.png deleted file mode 100644 index 88c464db04..0000000000 Binary files a/bitbake/lib/bb/ui/icons/settings/settings_display.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/settings/settings_hover.png b/bitbake/lib/bb/ui/icons/settings/settings_hover.png deleted file mode 100644 index d92a0bf2c3..0000000000 Binary files a/bitbake/lib/bb/ui/icons/settings/settings_hover.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/templates/templates_display.png b/bitbake/lib/bb/ui/icons/templates/templates_display.png deleted file mode 100644 index 153c7afb62..0000000000 Binary files a/bitbake/lib/bb/ui/icons/templates/templates_display.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/icons/templates/templates_hover.png b/bitbake/lib/bb/ui/icons/templates/templates_hover.png deleted file mode 100644 index afb7165fe5..0000000000 Binary files a/bitbake/lib/bb/ui/icons/templates/templates_hover.png and /dev/null differ diff --git a/bitbake/lib/bb/ui/knotty.py b/bitbake/lib/bb/ui/knotty.py deleted file mode 100644 index 00258c80ff..0000000000 --- a/bitbake/lib/bb/ui/knotty.py +++ /dev/null @@ -1,1030 +0,0 @@ -# -# BitBake (No)TTY UI Implementation -# -# Handling output to TTYs or files (no TTY) -# -# Copyright (C) 2006-2012 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -from __future__ import division - -import io -import os -import sys -import logging -import progressbar -import signal -import bb.msg -import time -import fcntl -import struct -import copy -import atexit -from itertools import groupby - -from bb.ui import uihelper -import bb.build -import bb.command -import bb.cooker -import bb.event -import bb.runqueue -import bb.utils - -featureSet = [bb.cooker.CookerFeatures.SEND_SANITYEVENTS, bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING] - -logger = logging.getLogger("BitBake") -interactive = sys.stdout.isatty() - -class BBProgress(progressbar.ProgressBar): - def __init__(self, msg, maxval, widgets=None, extrapos=-1, resize_handler=None): - self.msg = msg - self.extrapos = extrapos - if not widgets: - widgets = [': ', progressbar.Percentage(), ' ', progressbar.Bar(), - ' ', progressbar.ETA()] - self.extrapos = 5 - - if resize_handler: - self._resize_default = resize_handler - else: - self._resize_default = signal.getsignal(signal.SIGWINCH) - progressbar.ProgressBar.__init__(self, maxval, [self.msg] + widgets, fd=sys.stdout) - - def _handle_resize(self, signum=None, frame=None): - progressbar.ProgressBar._handle_resize(self, signum, frame) - if self._resize_default: - self._resize_default(signum, frame) - - def finish(self): - progressbar.ProgressBar.finish(self) - if self._resize_default: - signal.signal(signal.SIGWINCH, self._resize_default) - - def setmessage(self, msg): - self.msg = msg - self.widgets[0] = msg - - def setextra(self, extra): - if self.extrapos > -1: - if extra: - extrastr = str(extra) - if extrastr[0] != ' ': - extrastr = ' ' + extrastr - else: - extrastr = '' - self.widgets[self.extrapos] = extrastr - - def _need_update(self): - # We always want the bar to print when update() is called - return True - -class NonInteractiveProgress(object): - fobj = sys.stdout - - def __init__(self, msg, maxval): - self.msg = msg - self.maxval = maxval - self.finished = False - - def start(self, update=True): - self.fobj.write("%s..." % self.msg) - self.fobj.flush() - return self - - def update(self, value): - pass - - def finish(self): - if self.finished: - return - self.fobj.write("done.\n") - self.fobj.flush() - self.finished = True - -def new_progress(msg, maxval): - if interactive: - return BBProgress(msg, maxval) - else: - return NonInteractiveProgress(msg, maxval) - -def pluralise(singular, plural, qty): - if qty == 1: - return singular % qty - else: - return plural % qty - - -class InteractConsoleLogFilter(logging.Filter): - def __init__(self, tf): - self.tf = tf - super().__init__() - - def filter(self, record): - if record.levelno == bb.msg.BBLogFormatter.NOTE and (record.msg.startswith("Running") or record.msg.startswith("recipe ")): - return False - self.tf.clearFooter() - return True - -class TerminalFilter(object): - - # 40 Hz (FPS) -> 0.025 secs - _DEFAULT_PRINT_INTERVAL = 0.025 - - rows = 25 - columns = 80 - - def sigwinch_handle(self, signum, frame): - self.rows, self.columns = self.getTerminalColumns() - if self._sigwinch_default: - self._sigwinch_default(signum, frame) - - def getTerminalColumns(self): - def ioctl_GWINSZ(fd): - try: - cr = struct.unpack('hh', fcntl.ioctl(fd, self.termios.TIOCGWINSZ, '1234')) - except: - return None - return cr - cr = ioctl_GWINSZ(sys.stdout.fileno()) - if not cr: - try: - fd = os.open(os.ctermid(), os.O_RDONLY) - cr = ioctl_GWINSZ(fd) - os.close(fd) - except: - pass - if not cr: - try: - cr = (os.environ['LINES'], os.environ['COLUMNS']) - except: - cr = (25, 80) - return cr - - def __init__(self, main, helper, handlers, quiet): - self.main = main - self.helper = helper - self.cuu = None - self.stdinbackup = None - self.interactive = sys.stdout.isatty() - self.footer_present = False - self.lasttime = time.time() - self.quiet = quiet - - self._footer_buf = io.StringIO() - self._footer_lines = None - - if not self.interactive: - return - - try: - import curses - except ImportError: - sys.exit("FATAL: The knotty ui could not load the required curses python module.") - - import termios - self.curses = curses - self.termios = termios - try: - fd = sys.stdin.fileno() - self.stdinbackup = termios.tcgetattr(fd) - new = copy.deepcopy(self.stdinbackup) - new[3] = new[3] & ~termios.ECHO - termios.tcsetattr(fd, termios.TCSADRAIN, new) - curses.setupterm() - if curses.tigetnum("colors") > 2 and os.environ.get('NO_COLOR', '') == '': - for h in handlers: - try: - h.formatter.enable_color() - except AttributeError: - pass - self.ed = curses.tigetstr("ed") - if self.ed: - self.cuu = curses.tigetstr("cuu") - try: - self._sigwinch_default = signal.getsignal(signal.SIGWINCH) - signal.signal(signal.SIGWINCH, self.sigwinch_handle) - except: - pass - self.rows, self.columns = self.getTerminalColumns() - except: - self.cuu = None - if not self.cuu: - self.interactive = False - bb.note("Unable to use interactive mode for this terminal, using fallback") - return - - for h in handlers: - h.addFilter(InteractConsoleLogFilter(self)) - - self.main_progress = None - - def clearFooter(self): - if self.footer_present: - sys.stdout.buffer.write(self.curses.tparm(self.cuu, self._footer_lines)) - sys.stdout.buffer.write(self.curses.tparm(self.ed)) - sys.stdout.flush() - self.footer_present = False - self._footer_lines = None - - def elapsed(self, sec): - hrs = int(sec / 3600.0) - sec -= hrs * 3600 - min = int(sec / 60.0) - sec -= min * 60 - if hrs > 0: - return "%dh%dm%ds" % (hrs, min, sec) - elif min > 0: - return "%dm%ds" % (min, sec) - else: - return "%ds" % (sec) - - def keepAlive(self, t): - if not self.cuu: - msgbuf = ["Bitbake still alive (no events for %ds). Active tasks:" % t] - for t in self.helper.running_tasks: - msgbuf.append(str(t)) - print("\n".join(msgbuf)) - sys.stdout.flush() - - def updateFooter(self): - if not self.cuu: - return - activetasks = self.helper.running_tasks - failedtasks = self.helper.failed_tasks - currenttime = time.time() - deltatime = currenttime - self.lasttime - - if (deltatime > 5.0): - self.helper.needUpdate = True - need_update = self.helper.needUpdate - else: - # Do not let to update faster then _DEFAULT_PRINT_INTERVAL - # to avoid heavy print() flooding. - need_update = self.helper.needUpdate and (deltatime > self._DEFAULT_PRINT_INTERVAL) - - if self.footer_present and (not need_update): - # Footer update is not need. - return - else: - # Footer update is need and store its "lasttime" value. - self.lasttime = currenttime - - self.helper.needUpdate = False - if (not self.helper.tasknumber_total or self.helper.tasknumber_current == self.helper.tasknumber_total) and not len(activetasks): - self.clearFooter() - return - - # Clear footer buffer. - self._footer_buf.truncate(0) - self._footer_buf.seek(0) - - tasks = [] - for t in activetasks.keys(): - start_time = activetasks[t].get("starttime", None) - if start_time: - msg = "%s - %s (pid %s)" % (activetasks[t]["title"], self.elapsed(currenttime - start_time), activetasks[t]["pid"]) - else: - msg = "%s (pid %s)" % (activetasks[t]["title"], activetasks[t]["pid"]) - progress = activetasks[t].get("progress", None) - if progress is not None: - pbar = activetasks[t].get("progressbar", None) - rate = activetasks[t].get("rate", None) - if not pbar or pbar.bouncing != (progress < 0): - if progress < 0: - pbar = BBProgress("0: %s" % msg, 100, widgets=[' ', progressbar.BouncingSlider(), ''], extrapos=3, resize_handler=self.sigwinch_handle) - pbar.bouncing = True - else: - pbar = BBProgress("0: %s" % msg, 100, widgets=[' ', progressbar.Percentage(), ' ', progressbar.Bar(), ''], extrapos=5, resize_handler=self.sigwinch_handle) - pbar.bouncing = False - pbar.fd = self._footer_buf - activetasks[t]["progressbar"] = pbar - tasks.append((pbar, msg, progress, rate, start_time)) - else: - tasks.append(msg) - - if self.main.shutdown: - content = pluralise("Waiting for %s running task to finish", - "Waiting for %s running tasks to finish", len(activetasks)) - if not self.quiet: - content += ':' - print(content, file=self._footer_buf) - else: - scene_tasks = "%s of %s" % (self.helper.setscene_current, self.helper.setscene_total) - cur_tasks = "%s of %s" % (self.helper.tasknumber_current, self.helper.tasknumber_total) - - content = '' - if not self.quiet: - msg = "Setscene tasks: %s" % scene_tasks - content += msg + "\n" - print(msg, file=self._footer_buf) - - if self.quiet: - msg = "Running tasks (%s, %s)" % (scene_tasks, cur_tasks) - elif not len(activetasks): - msg = "No currently running tasks (%s)" % cur_tasks - else: - msg = "Currently %2s running tasks (%s)" % (len(activetasks), cur_tasks) - maxtask = self.helper.tasknumber_total - if not self.main_progress or self.main_progress.maxval != maxtask: - widgets = [' ', progressbar.Percentage(), ' ', progressbar.Bar()] - self.main_progress = BBProgress("Running tasks", maxtask, widgets=widgets, resize_handler=self.sigwinch_handle) - self.main_progress.fd = self._footer_buf - self.main_progress.start(False) - self.main_progress.setmessage(msg) - progress = max(0, self.helper.tasknumber_current - 1) - content += self.main_progress.update(progress) - print('', file=self._footer_buf) - lines = self.getlines(content) - if not self.quiet: - for tasknum, task in enumerate(tasks[:(self.rows - 1 - lines)]): - if isinstance(task, tuple): - pbar, msg, progress, rate, start_time = task - if not pbar.start_time: - pbar.start(False) - if start_time: - pbar.start_time = start_time - pbar.setmessage('%s: %s' % (tasknum, msg)) - pbar.setextra(rate) - if progress > -1: - content = pbar.update(progress) - else: - content = pbar.update(1) - print('', file=self._footer_buf) - else: - content = "%s: %s" % (tasknum, task) - print(content, file=self._footer_buf) - lines = lines + self.getlines(content) - self.lastcount = self.helper.tasknumber_current - - # Clear footer and Print buffer. - self.clearFooter() - print(self._footer_buf.getvalue(), end='') - self._footer_lines = lines - self.footer_present = True - - def getlines(self, content): - lines = 0 - for line in content.split("\n"): - lines = lines + 1 + int(len(line) / (self.columns + 1)) - return lines - - def finish(self): - self._footer_buf.close() - if self.stdinbackup: - fd = sys.stdin.fileno() - self.termios.tcsetattr(fd, self.termios.TCSADRAIN, self.stdinbackup) - -def print_event_log(event, includelogs, loglines, termfilter): - logfile = event.logfile - if logfile and os.path.exists(logfile): - termfilter.clearFooter() - bb.error("Logfile of failure stored in: %s" % logfile) - if includelogs and not event.errprinted: - msgbuf = ["Log data follows:"] - f = open(logfile, "r") - lines = [] - while True: - l = f.readline() - if l == '': - break - l = l.rstrip() - if loglines: - lines.append(' | %s' % l) - if len(lines) > int(loglines): - lines.pop(0) - else: - msgbuf.append('| %s' % l) - f.close() - if lines: - msgbuf.extend(lines) - print("\n".join(msgbuf)) - -def _log_settings_from_server(server, observe_only): - # Get values of variables which control our output - includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"]) - if error: - logger.error("Unable to get the value of BBINCLUDELOGS variable: %s" % error) - raise BaseException(error) - loglines, error = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"]) - if error: - logger.error("Unable to get the value of BBINCLUDELOGS_LINES variable: %s" % error) - raise BaseException(error) - if observe_only: - cmd = 'getVariable' - else: - cmd = 'getSetVariable' - consolelogfile, error = server.runCommand([cmd, "BB_CONSOLELOG"]) - if error: - logger.error("Unable to get the value of BB_CONSOLELOG variable: %s" % error) - raise BaseException(error) - logconfigfile, error = server.runCommand([cmd, "BB_LOGCONFIG"]) - if error: - logger.error("Unable to get the value of BB_LOGCONFIG variable: %s" % error) - raise BaseException(error) - return includelogs, loglines, consolelogfile, logconfigfile - -_evt_list = [ "bb.runqueue.runQueueExitWait", "bb.event.LogExecTTY", "logging.LogRecord", - "bb.build.TaskFailed", "bb.build.TaskBase", "bb.event.ParseStarted", - "bb.event.ParseProgress", "bb.event.ParseCompleted", "bb.event.CacheLoadStarted", - "bb.event.CacheLoadProgress", "bb.event.CacheLoadCompleted", "bb.command.CommandFailed", - "bb.command.CommandExit", "bb.command.CommandCompleted", "bb.cooker.CookerExit", - "bb.event.MultipleProviders", "bb.event.NoProvider", "bb.runqueue.sceneQueueTaskStarted", - "bb.runqueue.runQueueTaskStarted", "bb.runqueue.runQueueTaskFailed", "bb.runqueue.sceneQueueTaskFailed", - "bb.event.BuildBase", "bb.build.TaskStarted", "bb.build.TaskSucceeded", "bb.build.TaskFailedSilent", - "bb.build.TaskProgress", "bb.event.ProcessStarted", "bb.event.ProcessProgress", "bb.event.ProcessFinished"] - -def drain_events_errorhandling(eventHandler): - # We don't have logging setup, we do need to show any events we see before exiting - event = True - logger = bb.msg.logger_create('bitbake', sys.stdout) - while event: - event = eventHandler.waitEvent(0) - if isinstance(event, logging.LogRecord): - logger.handle(event) - -def main(server, eventHandler, params, tf = TerminalFilter): - - try: - if not params.observe_only: - params.updateToServer(server, os.environ.copy()) - - includelogs, loglines, consolelogfile, logconfigfile = _log_settings_from_server(server, params.observe_only) - - loglevel, _ = bb.msg.constructLogOptions() - except bb.BBHandledException: - drain_events_errorhandling(eventHandler) - return 1 - except Exception as e: - # bitbake-server comms failure - early_logger = bb.msg.logger_create('bitbake', sys.stdout) - early_logger.fatal("Attempting to set server environment: %s", e) - return 1 - - if params.options.quiet == 0: - console_loglevel = loglevel - elif params.options.quiet > 2: - console_loglevel = bb.msg.BBLogFormatter.ERROR - else: - console_loglevel = bb.msg.BBLogFormatter.WARNING - - logconfig = { - "version": 1, - "handlers": { - "BitBake.console": { - "class": "logging.StreamHandler", - "formatter": "BitBake.consoleFormatter", - "level": console_loglevel, - "stream": "ext://sys.stdout", - "filters": ["BitBake.stdoutFilter"], - ".": { - "is_console": True, - }, - }, - "BitBake.errconsole": { - "class": "logging.StreamHandler", - "formatter": "BitBake.consoleFormatter", - "level": loglevel, - "stream": "ext://sys.stderr", - "filters": ["BitBake.stderrFilter"], - ".": { - "is_console": True, - }, - }, - # This handler can be used if specific loggers should print on - # the console at a lower severity than the default. It will - # display any messages sent to it that are lower than then - # BitBake.console logging level (so as to prevent duplication of - # messages). Nothing is attached to this handler by default - "BitBake.verbconsole": { - "class": "logging.StreamHandler", - "formatter": "BitBake.consoleFormatter", - "level": 1, - "stream": "ext://sys.stdout", - "filters": ["BitBake.verbconsoleFilter"], - ".": { - "is_console": True, - }, - }, - }, - "formatters": { - # This format instance will get color output enabled by the - # terminal - "BitBake.consoleFormatter" : { - "()": "bb.msg.BBLogFormatter", - "format": "%(levelname)s: %(message)s" - }, - # The file log requires a separate instance so that it doesn't get - # color enabled - "BitBake.logfileFormatter": { - "()": "bb.msg.BBLogFormatter", - "format": "%(levelname)s: %(message)s" - } - }, - "filters": { - "BitBake.stdoutFilter": { - "()": "bb.msg.LogFilterLTLevel", - "level": "ERROR" - }, - "BitBake.stderrFilter": { - "()": "bb.msg.LogFilterGEQLevel", - "level": "ERROR" - }, - "BitBake.verbconsoleFilter": { - "()": "bb.msg.LogFilterLTLevel", - "level": console_loglevel - }, - }, - "loggers": { - "BitBake": { - "level": loglevel, - "handlers": ["BitBake.console", "BitBake.errconsole"], - } - }, - "disable_existing_loggers": False - } - - # Enable the console log file if enabled - if consolelogfile and not params.options.show_environment and not params.options.show_versions: - logconfig = bb.msg.mergeLoggingConfig(logconfig, { - "version": 1, - "handlers" : { - "BitBake.consolelog": { - "class": "logging.FileHandler", - "formatter": "BitBake.logfileFormatter", - "level": loglevel, - "filename": consolelogfile, - }, - # Just like verbconsole, anything sent here will go to the - # log file, unless it would go to BitBake.consolelog - "BitBake.verbconsolelog" : { - "class": "logging.FileHandler", - "formatter": "BitBake.logfileFormatter", - "level": 1, - "filename": consolelogfile, - "filters": ["BitBake.verbconsolelogFilter"], - }, - }, - "filters": { - "BitBake.verbconsolelogFilter": { - "()": "bb.msg.LogFilterLTLevel", - "level": loglevel, - }, - }, - "loggers": { - "BitBake": { - "handlers": ["BitBake.consolelog"], - }, - - # Other interesting things that we want to keep an eye on - # in the log files in case someone has an issue, but not - # necessarily show to the user on the console - "BitBake.SigGen.HashEquiv": { - "level": "VERBOSE", - "handlers": ["BitBake.verbconsolelog"], - }, - "BitBake.RunQueue.HashEquiv": { - "level": "VERBOSE", - "handlers": ["BitBake.verbconsolelog"], - } - } - }) - - consolelogdirname = os.path.dirname(consolelogfile) - # `bb.utils.mkdirhier` has this check, but it reports failure using bb.fatal, which logs - # to the very logger we are trying to set up. - if '${' in str(consolelogdirname): - print( - "FATAL: Directory name {} contains unexpanded bitbake variable. This may cause build failures and WORKDIR pollution.".format( - consolelogdirname)) - if '${MACHINE}' in consolelogdirname: - print("HINT: It looks like you forgot to set MACHINE in local.conf.") - - bb.utils.mkdirhier(consolelogdirname) - loglink = os.path.join(consolelogdirname, 'console-latest.log') - bb.utils.remove(loglink) - try: - os.symlink(os.path.basename(consolelogfile), loglink) - except OSError: - pass - - # Add the logging domains specified by the user on the command line - for (domainarg, iterator) in groupby(params.debug_domains): - dlevel = len(tuple(iterator)) - l = logconfig["loggers"].setdefault("BitBake.%s" % domainarg, {}) - l["level"] = logging.DEBUG - dlevel + 1 - l.setdefault("handlers", []).extend(["BitBake.verbconsole"]) - - conf = bb.msg.setLoggingConfig(logconfig, logconfigfile) - - if sys.stdin.isatty() and sys.stdout.isatty(): - log_exec_tty = True - else: - log_exec_tty = False - - should_print_hyperlinks = sys.stdout.isatty() and os.environ.get('NO_COLOR', '') == '' - - helper = uihelper.BBUIHelper() - - # Look for the specially designated handlers which need to be passed to the - # terminal handler - console_handlers = [h for h in conf.config['handlers'].values() if getattr(h, 'is_console', False)] - - bb.utils.set_process_name("KnottyUI") - - if params.options.remote_server and params.options.kill_server: - server.terminateServer() - return - - llevel, debug_domains = bb.msg.constructLogOptions() - try: - server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list]) - except (BrokenPipeError, EOFError) as e: - # bitbake-server comms failure - logger.fatal("Attempting to set event mask: %s", e) - return 1 - - # The logging_tree module is *extremely* helpful in debugging logging - # domains. Uncomment here to dump the logging tree when bitbake starts - #import logging_tree - #logging_tree.printout() - - universe = False - if not params.observe_only: - try: - params.updateFromServer(server) - except Exception as e: - logger.fatal("Fetching command line: %s", e) - return 1 - cmdline = params.parseActions() - if not cmdline: - print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.") - return 1 - if 'msg' in cmdline and cmdline['msg']: - logger.error(cmdline['msg']) - return 1 - if cmdline['action'][0] == "buildTargets" and "universe" in cmdline['action'][1]: - universe = True - - try: - ret, error = server.runCommand(cmdline['action']) - except (BrokenPipeError, EOFError) as e: - # bitbake-server comms failure - logger.fatal("Command '{}' failed: %s".format(cmdline), e) - return 1 - if error: - logger.error("Command '%s' failed: %s" % (cmdline, error)) - return 1 - elif not ret: - logger.error("Command '%s' failed: returned %s" % (cmdline, ret)) - return 1 - - - parseprogress = None - cacheprogress = None - main.shutdown = 0 - interrupted = False - return_value = 0 - errors = 0 - warnings = 0 - taskfailures = {} - - printintervaldelta = 10 * 60 # 10 minutes - printinterval = printintervaldelta - pinginterval = 1 * 60 # 1 minute - lastevent = lastprint = time.time() - - termfilter = tf(main, helper, console_handlers, params.options.quiet) - atexit.register(termfilter.finish) - - # shutdown levels - # 0 - normal operation - # 1 - no new task execution, let current running tasks finish - # 2 - interrupting currently executing tasks - # 3 - we're done, exit - while main.shutdown < 3: - try: - if (lastprint + printinterval) <= time.time(): - termfilter.keepAlive(printinterval) - printinterval += printintervaldelta - event = eventHandler.waitEvent(0) - if event is None: - if (lastevent + pinginterval) <= time.time(): - ret, error = server.runCommand(["ping"]) - if error or not ret: - termfilter.clearFooter() - print("No reply after pinging server (%s, %s), exiting." % (str(error), str(ret))) - return_value = 3 - main.shutdown = 3 - lastevent = time.time() - if not parseprogress: - termfilter.updateFooter() - event = eventHandler.waitEvent(0.25) - if event is None: - continue - lastevent = time.time() - helper.eventHandler(event) - if isinstance(event, bb.runqueue.runQueueExitWait): - if not main.shutdown: - main.shutdown = 1 - continue - if isinstance(event, bb.event.LogExecTTY): - if log_exec_tty: - tries = event.retries - while tries: - print("Trying to run: %s" % event.prog) - if os.system(event.prog) == 0: - break - time.sleep(event.sleep_delay) - tries -= 1 - if tries: - continue - logger.warning(event.msg) - continue - - if isinstance(event, logging.LogRecord): - lastprint = time.time() - printinterval = printintervaldelta - if event.levelno >= bb.msg.BBLogFormatter.ERRORONCE: - errors = errors + 1 - return_value = 1 - elif event.levelno == bb.msg.BBLogFormatter.WARNING: - warnings = warnings + 1 - - if event.taskpid != 0: - # For "normal" logging conditions, don't show note logs from tasks - # but do show them if the user has changed the default log level to - # include verbose/debug messages - if event.levelno <= bb.msg.BBLogFormatter.NOTE and (event.levelno < llevel or (event.levelno == bb.msg.BBLogFormatter.NOTE and llevel != bb.msg.BBLogFormatter.VERBOSE)): - continue - - # Prefix task messages with recipe/task - if event.taskpid in helper.pidmap and event.levelno not in [bb.msg.BBLogFormatter.PLAIN, bb.msg.BBLogFormatter.WARNONCE, bb.msg.BBLogFormatter.ERRORONCE]: - taskinfo = helper.running_tasks[helper.pidmap[event.taskpid]] - event.msg = taskinfo['title'] + ': ' + event.msg - if hasattr(event, 'fn') and event.levelno not in [bb.msg.BBLogFormatter.WARNONCE, bb.msg.BBLogFormatter.ERRORONCE]: - event.msg = event.fn + ': ' + event.msg - logging.getLogger(event.name).handle(event) - continue - - if isinstance(event, bb.build.TaskFailedSilent): - logger.warning("Logfile for failed setscene task is %s" % event.logfile) - continue - if isinstance(event, bb.build.TaskFailed): - return_value = 1 - print_event_log(event, includelogs, loglines, termfilter) - k = "{}:{}".format(event._fn, event._task) - taskfailures[k] = event.logfile - if isinstance(event, bb.build.TaskBase): - logger.info(event._message) - continue - if isinstance(event, bb.event.ParseStarted): - if params.options.quiet > 1: - continue - if event.total == 0: - continue - termfilter.clearFooter() - parseprogress = new_progress("Parsing recipes", event.total).start() - continue - if isinstance(event, bb.event.ParseProgress): - if params.options.quiet > 1: - continue - if parseprogress: - parseprogress.update(event.current) - else: - bb.warn("Got ParseProgress event for parsing that never started?") - continue - if isinstance(event, bb.event.ParseCompleted): - if params.options.quiet > 1: - continue - if not parseprogress: - continue - parseprogress.finish() - parseprogress = None - if params.options.quiet == 0: - print(("Parsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors." - % ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors))) - continue - - if isinstance(event, bb.event.CacheLoadStarted): - if params.options.quiet > 1: - continue - cacheprogress = new_progress("Loading cache", event.total).start() - continue - if isinstance(event, bb.event.CacheLoadProgress): - if params.options.quiet > 1: - continue - cacheprogress.update(event.current) - continue - if isinstance(event, bb.event.CacheLoadCompleted): - if params.options.quiet > 1: - continue - cacheprogress.finish() - if params.options.quiet == 0: - print("Loaded %d entries from dependency cache." % event.num_entries) - continue - - if isinstance(event, bb.command.CommandFailed): - return_value = event.exitcode - if event.error: - errors = errors + 1 - logger.error(str(event)) - main.shutdown = 3 - continue - if isinstance(event, bb.command.CommandExit): - if not return_value: - return_value = event.exitcode - main.shutdown = 3 - continue - if isinstance(event, (bb.command.CommandCompleted, bb.cooker.CookerExit)): - main.shutdown = 3 - continue - if isinstance(event, bb.event.MultipleProviders): - logger.info(str(event)) - continue - if isinstance(event, bb.event.NoProvider): - # For universe builds, only show these as warnings, not errors - if not universe: - return_value = 1 - errors = errors + 1 - logger.error(str(event)) - else: - logger.warning(str(event)) - continue - - if isinstance(event, bb.runqueue.sceneQueueTaskStarted): - logger.info("Running setscene task %d of %d (%s)" % (event.stats.setscene_covered + event.stats.setscene_active + event.stats.setscene_notcovered + 1, event.stats.setscene_total, event.taskstring)) - continue - - if isinstance(event, bb.runqueue.runQueueTaskStarted): - if event.noexec: - tasktype = 'noexec task' - else: - tasktype = 'task' - logger.info("Running %s %d of %d (%s)", - tasktype, - event.stats.completed + event.stats.active + - event.stats.failed + 1, - event.stats.total, event.taskstring) - continue - - if isinstance(event, bb.runqueue.runQueueTaskFailed): - return_value = 1 - taskfailures.setdefault(event.taskstring) - logger.error(str(event)) - continue - - if isinstance(event, bb.runqueue.sceneQueueTaskFailed): - logger.warning(str(event)) - continue - - if isinstance(event, bb.event.DepTreeGenerated): - continue - - if isinstance(event, bb.event.ProcessStarted): - if params.options.quiet > 1: - continue - termfilter.clearFooter() - parseprogress = new_progress(event.processname, event.total) - parseprogress.start(False) - continue - if isinstance(event, bb.event.ProcessProgress): - if params.options.quiet > 1: - continue - if parseprogress: - parseprogress.update(event.progress) - else: - bb.warn("Got ProcessProgress event for someting that never started?") - continue - if isinstance(event, bb.event.ProcessFinished): - if params.options.quiet > 1: - continue - if parseprogress: - parseprogress.finish() - parseprogress = None - continue - - # ignore - if isinstance(event, (bb.event.BuildBase, - bb.event.MetadataEvent, - bb.event.ConfigParsed, - bb.event.MultiConfigParsed, - bb.event.RecipeParsed, - bb.event.RecipePreFinalise, - bb.runqueue.runQueueEvent, - bb.event.OperationStarted, - bb.event.OperationCompleted, - bb.event.OperationProgress, - bb.event.DiskFull, - bb.event.HeartbeatEvent, - bb.build.TaskProgress)): - continue - - logger.error("Unknown event: %s", event) - - except (BrokenPipeError, EOFError) as e: - # bitbake-server comms failure, don't attempt further comms and exit - logger.fatal("Executing event: %s", e) - return_value = 1 - errors = errors + 1 - main.shutdown = 3 - except EnvironmentError as ioerror: - termfilter.clearFooter() - # ignore interrupted io - if ioerror.args[0] == 4: - continue - sys.stderr.write(str(ioerror)) - main.shutdown = 2 - if not params.observe_only: - try: - _, error = server.runCommand(["stateForceShutdown"]) - except (BrokenPipeError, EOFError) as e: - # bitbake-server comms failure, don't attempt further comms and exit - logger.fatal("Unable to force shutdown: %s", e) - main.shutdown = 3 - except KeyboardInterrupt: - termfilter.clearFooter() - if params.observe_only: - print("\nKeyboard Interrupt, exiting observer...") - main.shutdown = 2 - - def state_force_shutdown(): - print("\nSecond Keyboard Interrupt, stopping...\n") - try: - _, error = server.runCommand(["stateForceShutdown"]) - if error: - logger.error("Unable to cleanly stop: %s" % error) - except (BrokenPipeError, EOFError) as e: - # bitbake-server comms failure - logger.fatal("Unable to cleanly stop: %s", e) - - if not params.observe_only and main.shutdown == 1: - state_force_shutdown() - - if not params.observe_only and main.shutdown == 0: - print("\nKeyboard Interrupt, closing down...\n") - interrupted = True - # Capture the second KeyboardInterrupt during stateShutdown is running - try: - _, error = server.runCommand(["stateShutdown"]) - if error: - logger.error("Unable to cleanly shutdown: %s" % error) - except (BrokenPipeError, EOFError) as e: - # bitbake-server comms failure - logger.fatal("Unable to cleanly shutdown: %s", e) - except KeyboardInterrupt: - state_force_shutdown() - - main.shutdown = main.shutdown + 1 - except Exception as e: - import traceback - sys.stderr.write(traceback.format_exc()) - main.shutdown = 2 - if not params.observe_only: - try: - _, error = server.runCommand(["stateForceShutdown"]) - except (BrokenPipeError, EOFError) as e: - # bitbake-server comms failure, don't attempt further comms and exit - logger.fatal("Unable to force shutdown: %s", e) - main.shudown = 3 - return_value = 1 - try: - termfilter.clearFooter() - summary = "" - def format_hyperlink(url, link_text): - if should_print_hyperlinks: - start = f'\033]8;;{url}\033\\' - end = '\033]8;;\033\\' - return f'{start}{link_text}{end}' - return link_text - - if taskfailures: - summary += pluralise("\nSummary: %s task failed:", - "\nSummary: %s tasks failed:", len(taskfailures)) - for (failure, log_file) in taskfailures.items(): - summary += "\n %s" % failure - if log_file: - hyperlink = format_hyperlink(f"file://{log_file}", log_file) - summary += "\n log: {}".format(hyperlink) - if warnings: - summary += pluralise("\nSummary: There was %s WARNING message.", - "\nSummary: There were %s WARNING messages.", warnings) - if return_value and errors: - summary += pluralise("\nSummary: There was %s ERROR message, returning a non-zero exit code.", - "\nSummary: There were %s ERROR messages, returning a non-zero exit code.", errors) - if summary and params.options.quiet == 0: - print(summary) - - if interrupted: - print("Execution was interrupted, returning a non-zero exit code.") - if return_value == 0: - return_value = 1 - except IOError as e: - import errno - if e.errno == errno.EPIPE: - pass - - logging.shutdown() - - return return_value diff --git a/bitbake/lib/bb/ui/ncurses.py b/bitbake/lib/bb/ui/ncurses.py deleted file mode 100644 index 18a706547a..0000000000 --- a/bitbake/lib/bb/ui/ncurses.py +++ /dev/null @@ -1,367 +0,0 @@ -# -# BitBake Curses UI Implementation -# -# Implements an ncurses frontend for the BitBake utility. -# -# Copyright (C) 2006 Michael 'Mickey' Lauer -# Copyright (C) 2006-2007 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -""" - We have the following windows: - - 1.) Main Window: Shows what we are ultimately building and how far we are. Includes status bar - 2.) Thread Activity Window: Shows one status line for every concurrent bitbake thread. - 3.) Command Line Window: Contains an interactive command line where you can interact w/ Bitbake. - - Basic window layout is like that: - - |---------------------------------------------------------| - |
    | | - | | 0: foo do_compile complete| - | Building Gtk+-2.6.10 | 1: bar do_patch complete | - | Status: 60% | ... | - | | ... | - | | ... | - |---------------------------------------------------------| - | | - |>>> which virtual/kernel | - |openzaurus-kernel | - |>>> _ | - |---------------------------------------------------------| - -""" - - - -import logging -import os, sys, itertools, time - -try: - import curses -except ImportError: - sys.exit("FATAL: The ncurses ui could not load the required curses python module.") - -import bb -import xmlrpc.client -from bb.ui import uihelper - -logger = logging.getLogger(__name__) - -parsespin = itertools.cycle( r'|/-\\' ) - -X = 0 -Y = 1 -WIDTH = 2 -HEIGHT = 3 - -MAXSTATUSLENGTH = 32 - -class NCursesUI: - """ - NCurses UI Class - """ - class Window: - """Base Window Class""" - def __init__( self, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ): - self.win = curses.newwin( height, width, y, x ) - self.dimensions = ( x, y, width, height ) - """ - if curses.has_colors(): - color = 1 - curses.init_pair( color, fg, bg ) - self.win.bkgdset( ord(' '), curses.color_pair(color) ) - else: - self.win.bkgdset( ord(' '), curses.A_BOLD ) - """ - self.erase() - self.setScrolling() - self.win.noutrefresh() - - def erase( self ): - self.win.erase() - - def setScrolling( self, b = True ): - self.win.scrollok( b ) - self.win.idlok( b ) - - def setBoxed( self ): - self.boxed = True - self.win.box() - self.win.noutrefresh() - - def setText( self, x, y, text, *args ): - self.win.addstr( y, x, text, *args ) - self.win.noutrefresh() - - def appendText( self, text, *args ): - self.win.addstr( text, *args ) - self.win.noutrefresh() - - def drawHline( self, y ): - self.win.hline( y, 0, curses.ACS_HLINE, self.dimensions[WIDTH] ) - self.win.noutrefresh() - - class DecoratedWindow( Window ): - """Base class for windows with a box and a title bar""" - def __init__( self, title, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ): - NCursesUI.Window.__init__( self, x+1, y+3, width-2, height-4, fg, bg ) - self.decoration = NCursesUI.Window( x, y, width, height, fg, bg ) - self.decoration.setBoxed() - self.decoration.win.hline( 2, 1, curses.ACS_HLINE, width-2 ) - self.setTitle( title ) - - def setTitle( self, title ): - self.decoration.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD ) - - #-------------------------------------------------------------------------# -# class TitleWindow( Window ): - #-------------------------------------------------------------------------# -# """Title Window""" -# def __init__( self, x, y, width, height ): -# NCursesUI.Window.__init__( self, x, y, width, height ) -# version = bb.__version__ -# title = "BitBake %s" % version -# credit = "(C) 2003-2007 Team BitBake" -# #self.win.hline( 2, 1, curses.ACS_HLINE, width-2 ) -# self.win.border() -# self.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD ) -# self.setText( 1, 2, credit.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD ) - - #-------------------------------------------------------------------------# - class ThreadActivityWindow( DecoratedWindow ): - #-------------------------------------------------------------------------# - """Thread Activity Window""" - def __init__( self, x, y, width, height ): - NCursesUI.DecoratedWindow.__init__( self, "Thread Activity", x, y, width, height ) - - def setStatus( self, thread, text ): - line = "%02d: %s" % ( thread, text ) - width = self.dimensions[WIDTH] - if ( len(line) > width ): - line = line[:width-3] + "..." - else: - line = line.ljust( width ) - self.setText( 0, thread, line ) - - #-------------------------------------------------------------------------# - class MainWindow( DecoratedWindow ): - #-------------------------------------------------------------------------# - """Main Window""" - def __init__( self, x, y, width, height ): - self.StatusPosition = width - MAXSTATUSLENGTH - NCursesUI.DecoratedWindow.__init__( self, None, x, y, width, height ) - curses.nl() - - def setTitle( self, title ): - title = "BitBake %s" % bb.__version__ - self.decoration.setText( 2, 1, title, curses.A_BOLD ) - self.decoration.setText( self.StatusPosition - 8, 1, "Status:", curses.A_BOLD ) - - def setStatus(self, status): - while len(status) < MAXSTATUSLENGTH: - status = status + " " - self.decoration.setText( self.StatusPosition, 1, status, curses.A_BOLD ) - - - #-------------------------------------------------------------------------# - class ShellOutputWindow( DecoratedWindow ): - #-------------------------------------------------------------------------# - """Interactive Command Line Output""" - def __init__( self, x, y, width, height ): - NCursesUI.DecoratedWindow.__init__( self, "Command Line Window", x, y, width, height ) - - #-------------------------------------------------------------------------# - class ShellInputWindow( Window ): - #-------------------------------------------------------------------------# - """Interactive Command Line Input""" - def __init__( self, x, y, width, height ): - NCursesUI.Window.__init__( self, x, y, width, height ) - -# put that to the top again from curses.textpad import Textbox -# self.textbox = Textbox( self.win ) -# t = threading.Thread() -# t.run = self.textbox.edit -# t.start() - - #-------------------------------------------------------------------------# - def main(self, stdscr, server, eventHandler, params): - #-------------------------------------------------------------------------# - height, width = stdscr.getmaxyx() - - # for now split it like that: - # MAIN_y + THREAD_y = 2/3 screen at the top - # MAIN_x = 2/3 left, THREAD_y = 1/3 right - # CLI_y = 1/3 of screen at the bottom - # CLI_x = full - - main_left = 0 - main_top = 0 - main_height = ( height // 3 * 2 ) - main_width = ( width // 3 ) * 2 - clo_left = main_left - clo_top = main_top + main_height - clo_height = height - main_height - main_top - 1 - clo_width = width - cli_left = main_left - cli_top = clo_top + clo_height - cli_height = 1 - cli_width = width - thread_left = main_left + main_width - thread_top = main_top - thread_height = main_height - thread_width = width - main_width - - #tw = self.TitleWindow( 0, 0, width, main_top ) - mw = self.MainWindow( main_left, main_top, main_width, main_height ) - taw = self.ThreadActivityWindow( thread_left, thread_top, thread_width, thread_height ) - clo = self.ShellOutputWindow( clo_left, clo_top, clo_width, clo_height ) - cli = self.ShellInputWindow( cli_left, cli_top, cli_width, cli_height ) - cli.setText( 0, 0, "BB>" ) - - mw.setStatus("Idle") - - helper = uihelper.BBUIHelper() - shutdown = 0 - - try: - if not params.observe_only: - params.updateToServer(server, os.environ.copy()) - - params.updateFromServer(server) - cmdline = params.parseActions() - if not cmdline: - print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.") - return 1 - if 'msg' in cmdline and cmdline['msg']: - logger.error(cmdline['msg']) - return 1 - cmdline = cmdline['action'] - ret, error = server.runCommand(cmdline) - if error: - print("Error running command '%s': %s" % (cmdline, error)) - return - elif not ret: - print("Couldn't get default commandlind! %s" % ret) - return - except xmlrpc.client.Fault as x: - print("XMLRPC Fault getting commandline:\n %s" % x) - return - - exitflag = False - while not exitflag: - try: - event = eventHandler.waitEvent(0.25) - if not event: - continue - - helper.eventHandler(event) - if isinstance(event, bb.build.TaskBase): - mw.appendText("NOTE: %s\n" % event._message) - if isinstance(event, logging.LogRecord): - mw.appendText(logging.getLevelName(event.levelno) + ': ' + event.getMessage() + '\n') - - if isinstance(event, bb.event.CacheLoadStarted): - self.parse_total = event.total - if isinstance(event, bb.event.CacheLoadProgress): - x = event.current - y = self.parse_total - mw.setStatus("Loading Cache: %s [%2d %%]" % ( next(parsespin), x*100/y ) ) - if isinstance(event, bb.event.CacheLoadCompleted): - mw.setStatus("Idle") - mw.appendText("Loaded %d entries from dependency cache.\n" - % ( event.num_entries)) - - if isinstance(event, bb.event.ParseStarted): - self.parse_total = event.total - if isinstance(event, bb.event.ParseProgress): - x = event.current - y = self.parse_total - mw.setStatus("Parsing Recipes: %s [%2d %%]" % ( next(parsespin), x*100/y ) ) - if isinstance(event, bb.event.ParseCompleted): - mw.setStatus("Idle") - mw.appendText("Parsing finished. %d cached, %d parsed, %d skipped, %d masked.\n" - % ( event.cached, event.parsed, event.skipped, event.masked )) - -# if isinstance(event, bb.build.TaskFailed): -# if event.logfile: -# if data.getVar("BBINCLUDELOGS", d): -# bb.error("log data follows (%s)" % logfile) -# number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d) -# if number_of_lines: -# subprocess.check_call('tail -n%s %s' % (number_of_lines, logfile), shell=True) -# else: -# f = open(logfile, "r") -# while True: -# l = f.readline() -# if l == '': -# break -# l = l.rstrip() -# print '| %s' % l -# f.close() -# else: -# bb.error("see log in %s" % logfile) - - if isinstance(event, bb.command.CommandCompleted): - # stop so the user can see the result of the build, but - # also allow them to now exit with a single ^C - shutdown = 2 - if isinstance(event, bb.command.CommandFailed): - mw.appendText(str(event)) - time.sleep(2) - exitflag = True - if isinstance(event, bb.command.CommandExit): - exitflag = True - if isinstance(event, bb.cooker.CookerExit): - exitflag = True - - if isinstance(event, bb.event.LogExecTTY): - mw.appendText('WARN: ' + event.msg + '\n') - if helper.needUpdate: - activetasks, failedtasks = helper.getTasks() - taw.erase() - taw.setText(0, 0, "") - if activetasks: - taw.appendText("Active Tasks:\n") - for task in activetasks.values(): - taw.appendText(task["title"] + '\n') - if failedtasks: - taw.appendText("Failed Tasks:\n") - for task in failedtasks: - taw.appendText(task["title"] + '\n') - - curses.doupdate() - except EnvironmentError as ioerror: - # ignore interrupted io - if ioerror.args[0] == 4: - pass - - except KeyboardInterrupt: - if shutdown == 2: - mw.appendText("Third Keyboard Interrupt, exit.\n") - exitflag = True - if shutdown == 1: - mw.appendText("Second Keyboard Interrupt, stopping...\n") - _, error = server.runCommand(["stateForceShutdown"]) - if error: - print("Unable to cleanly stop: %s" % error) - if shutdown == 0: - mw.appendText("Keyboard Interrupt, closing down...\n") - _, error = server.runCommand(["stateShutdown"]) - if error: - print("Unable to cleanly shutdown: %s" % error) - shutdown = shutdown + 1 - pass - -def main(server, eventHandler, params): - if not os.isatty(sys.stdout.fileno()): - print("FATAL: Unable to run 'ncurses' UI without a TTY.") - return - ui = NCursesUI() - try: - curses.wrapper(ui.main, server, eventHandler, params) - except: - import traceback - traceback.print_exc() diff --git a/bitbake/lib/bb/ui/taskexp.py b/bitbake/lib/bb/ui/taskexp.py deleted file mode 100644 index bedfd69b09..0000000000 --- a/bitbake/lib/bb/ui/taskexp.py +++ /dev/null @@ -1,341 +0,0 @@ -# -# BitBake Graphical GTK based Dependency Explorer -# -# Copyright (C) 2007 Ross Burton -# Copyright (C) 2007 - 2008 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import sys -import traceback - -try: - import gi - gi.require_version('Gtk', '3.0') - from gi.repository import Gtk, Gdk, GObject -except ValueError: - sys.exit("FATAL: Gtk version needs to be 3.0") -except ImportError: - sys.exit("FATAL: Gtk ui could not load the required gi python module") - -import threading -from xmlrpc import client -import bb -import bb.event - -# Package Model -(COL_PKG_NAME) = (0) - -# Dependency Model -(TYPE_DEP, TYPE_RDEP) = (0, 1) -(COL_DEP_TYPE, COL_DEP_PARENT, COL_DEP_PACKAGE) = (0, 1, 2) - - -class PackageDepView(Gtk.TreeView): - def __init__(self, model, dep_type, label): - Gtk.TreeView.__init__(self) - self.current = None - self.dep_type = dep_type - self.filter_model = model.filter_new() - self.filter_model.set_visible_func(self._filter, data=None) - self.set_model(self.filter_model) - self.append_column(Gtk.TreeViewColumn(label, Gtk.CellRendererText(), text=COL_DEP_PACKAGE)) - - def _filter(self, model, iter, data): - this_type = model[iter][COL_DEP_TYPE] - package = model[iter][COL_DEP_PARENT] - if this_type != self.dep_type: return False - return package == self.current - - def set_current_package(self, package): - self.current = package - self.filter_model.refilter() - - -class PackageReverseDepView(Gtk.TreeView): - def __init__(self, model, label): - Gtk.TreeView.__init__(self) - self.current = None - self.filter_model = model.filter_new() - self.filter_model.set_visible_func(self._filter) - # The introspected API was fixed but we can't rely on a pygobject that hides this. - # https://gitlab.gnome.org/GNOME/pygobject/-/commit/9cdbc56fbac4db2de78dc080934b8f0a7efc892a - if hasattr(Gtk.TreeModelSort, "new_with_model"): - self.sort_model = Gtk.TreeModelSort.new_with_model(self.filter_model) - else: - self.sort_model = self.filter_model.sort_new_with_model() - self.sort_model.set_sort_column_id(COL_DEP_PARENT, Gtk.SortType.ASCENDING) - self.set_model(self.sort_model) - self.append_column(Gtk.TreeViewColumn(label, Gtk.CellRendererText(), text=COL_DEP_PARENT)) - - def _filter(self, model, iter, data): - package = model[iter][COL_DEP_PACKAGE] - return package == self.current - - def set_current_package(self, package): - self.current = package - self.filter_model.refilter() - - -class DepExplorer(Gtk.Window): - def __init__(self): - Gtk.Window.__init__(self) - self.set_title("Task Dependency Explorer") - self.set_default_size(500, 500) - self.connect("delete-event", Gtk.main_quit) - - # Create the data models - self.pkg_model = Gtk.ListStore(GObject.TYPE_STRING) - self.pkg_model.set_sort_column_id(COL_PKG_NAME, Gtk.SortType.ASCENDING) - self.depends_model = Gtk.ListStore(GObject.TYPE_INT, GObject.TYPE_STRING, GObject.TYPE_STRING) - self.depends_model.set_sort_column_id(COL_DEP_PACKAGE, Gtk.SortType.ASCENDING) - - pane = Gtk.HPaned() - pane.set_position(250) - self.add(pane) - - # The master list of packages - scrolled = Gtk.ScrolledWindow() - scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC) - scrolled.set_shadow_type(Gtk.ShadowType.IN) - - self.pkg_treeview = Gtk.TreeView(self.pkg_model) - self.pkg_treeview.get_selection().connect("changed", self.on_cursor_changed) - column = Gtk.TreeViewColumn("Package", Gtk.CellRendererText(), text=COL_PKG_NAME) - self.pkg_treeview.append_column(column) - scrolled.add(self.pkg_treeview) - - self.search_entry = Gtk.SearchEntry.new() - self.pkg_treeview.set_search_entry(self.search_entry) - - left_panel = Gtk.VPaned() - left_panel.add(self.search_entry) - left_panel.add(scrolled) - pane.add1(left_panel) - - box = Gtk.VBox(homogeneous=True, spacing=4) - - # Task Depends - scrolled = Gtk.ScrolledWindow() - scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC) - scrolled.set_shadow_type(Gtk.ShadowType.IN) - self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Dependencies") - self.dep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE) - scrolled.add(self.dep_treeview) - box.add(scrolled) - pane.add2(box) - - # Reverse Task Depends - scrolled = Gtk.ScrolledWindow() - scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC) - scrolled.set_shadow_type(Gtk.ShadowType.IN) - self.revdep_treeview = PackageReverseDepView(self.depends_model, "Dependent Tasks") - self.revdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PARENT) - scrolled.add(self.revdep_treeview) - box.add(scrolled) - pane.add2(box) - - self.show_all() - self.search_entry.grab_focus() - - def on_package_activated(self, treeview, path, column, data_col): - model = treeview.get_model() - package = model.get_value(model.get_iter(path), data_col) - - pkg_path = [] - def finder(model, path, iter, needle): - package = model.get_value(iter, COL_PKG_NAME) - if package == needle: - pkg_path.append(path) - return True - else: - return False - self.pkg_model.foreach(finder, package) - if pkg_path: - self.pkg_treeview.get_selection().select_path(pkg_path[0]) - self.pkg_treeview.scroll_to_cell(pkg_path[0]) - - def on_cursor_changed(self, selection): - (model, it) = selection.get_selected() - if it is None: - current_package = None - else: - current_package = model.get_value(it, COL_PKG_NAME) - self.dep_treeview.set_current_package(current_package) - self.revdep_treeview.set_current_package(current_package) - - - def parse(self, depgraph): - for task in depgraph["tdepends"]: - self.pkg_model.insert(0, (task,)) - for depend in depgraph["tdepends"][task]: - self.depends_model.insert (0, (TYPE_DEP, task, depend)) - - -class gtkthread(threading.Thread): - quit = threading.Event() - def __init__(self, shutdown): - threading.Thread.__init__(self) - self.daemon = True - self.shutdown = shutdown - if not Gtk.init_check()[0]: - sys.stderr.write("Gtk+ init failed. Make sure DISPLAY variable is set.\n") - gtkthread.quit.set() - - def run(self): - GObject.threads_init() - Gdk.threads_init() - Gtk.main() - gtkthread.quit.set() - - -def main(server, eventHandler, params): - shutdown = 0 - - gtkgui = gtkthread(shutdown) - gtkgui.start() - - try: - params.updateToServer(server, os.environ.copy()) - params.updateFromServer(server) - cmdline = params.parseActions() - if not cmdline: - print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.") - return 1 - if 'msg' in cmdline and cmdline['msg']: - print(cmdline['msg']) - return 1 - cmdline = cmdline['action'] - if not cmdline or cmdline[0] != "generateDotGraph": - print("This UI requires the -g option") - return 1 - ret, error = server.runCommand(["generateDepTreeEvent", cmdline[1], cmdline[2]]) - if error: - print("Error running command '%s': %s" % (cmdline, error)) - return 1 - elif not ret: - print("Error running command '%s': returned %s" % (cmdline, ret)) - return 1 - except client.Fault as x: - print("XMLRPC Fault getting commandline:\n %s" % x) - return - except Exception as e: - print("Exception in startup:\n %s" % traceback.format_exc()) - return - - if gtkthread.quit.isSet(): - return - - Gdk.threads_enter() - dep = DepExplorer() - bardialog = Gtk.Dialog(parent=dep, - flags=Gtk.DialogFlags.MODAL|Gtk.DialogFlags.DESTROY_WITH_PARENT) - bardialog.set_default_size(400, 50) - box = bardialog.get_content_area() - pbar = Gtk.ProgressBar() - box.pack_start(pbar, True, True, 0) - bardialog.show_all() - bardialog.connect("delete-event", Gtk.main_quit) - Gdk.threads_leave() - - progress_total = 0 - while True: - try: - event = eventHandler.waitEvent(0.25) - if gtkthread.quit.isSet(): - _, error = server.runCommand(["stateForceShutdown"]) - if error: - print('Unable to cleanly stop: %s' % error) - break - - if event is None: - continue - - if isinstance(event, bb.event.CacheLoadStarted): - progress_total = event.total - Gdk.threads_enter() - bardialog.set_title("Loading Cache") - pbar.set_fraction(0.0) - Gdk.threads_leave() - - if isinstance(event, bb.event.CacheLoadProgress): - x = event.current - Gdk.threads_enter() - pbar.set_fraction(x * 1.0 / progress_total) - Gdk.threads_leave() - continue - - if isinstance(event, bb.event.CacheLoadCompleted): - continue - - if isinstance(event, bb.event.ParseStarted): - progress_total = event.total - if progress_total == 0: - continue - Gdk.threads_enter() - pbar.set_fraction(0.0) - bardialog.set_title("Processing recipes") - Gdk.threads_leave() - - if isinstance(event, bb.event.ParseProgress): - x = event.current - Gdk.threads_enter() - pbar.set_fraction(x * 1.0 / progress_total) - Gdk.threads_leave() - continue - - if isinstance(event, bb.event.ParseCompleted): - Gdk.threads_enter() - bardialog.set_title("Generating dependency tree") - Gdk.threads_leave() - continue - - if isinstance(event, bb.event.DepTreeGenerated): - Gdk.threads_enter() - bardialog.hide() - dep.parse(event._depgraph) - Gdk.threads_leave() - - if isinstance(event, bb.command.CommandCompleted): - continue - - if isinstance(event, bb.event.NoProvider): - print(str(event)) - - _, error = server.runCommand(["stateShutdown"]) - if error: - print('Unable to cleanly shutdown: %s' % error) - break - - if isinstance(event, bb.command.CommandFailed): - print(str(event)) - return event.exitcode - - if isinstance(event, bb.command.CommandExit): - return event.exitcode - - if isinstance(event, bb.cooker.CookerExit): - break - - continue - except EnvironmentError as ioerror: - # ignore interrupted io - if ioerror.args[0] == 4: - pass - except KeyboardInterrupt: - if shutdown == 2: - print("\nThird Keyboard Interrupt, exit.\n") - break - if shutdown == 1: - print("\nSecond Keyboard Interrupt, stopping...\n") - _, error = server.runCommand(["stateForceShutdown"]) - if error: - print('Unable to cleanly stop: %s' % error) - if shutdown == 0: - print("\nKeyboard Interrupt, closing down...\n") - _, error = server.runCommand(["stateShutdown"]) - if error: - print('Unable to cleanly shutdown: %s' % error) - shutdown = shutdown + 1 - pass diff --git a/bitbake/lib/bb/ui/taskexp_ncurses.py b/bitbake/lib/bb/ui/taskexp_ncurses.py deleted file mode 100755 index ea94a4987f..0000000000 --- a/bitbake/lib/bb/ui/taskexp_ncurses.py +++ /dev/null @@ -1,1511 +0,0 @@ -# -# BitBake Graphical ncurses-based Dependency Explorer -# * Based on the GTK implementation -# * Intended to run on any Linux host -# -# Copyright (C) 2007 Ross Burton -# Copyright (C) 2007 - 2008 Richard Purdie -# Copyright (C) 2022 - 2024 David Reyna -# -# SPDX-License-Identifier: GPL-2.0-only -# - -# -# Execution example: -# $ bitbake -g -u taskexp_ncurses zlib acl -# -# Self-test example (executes a script of GUI actions): -# $ TASK_EXP_UNIT_TEST=1 bitbake -g -u taskexp_ncurses zlib acl -# ... -# $ echo $? -# 0 -# $ TASK_EXP_UNIT_TEST=1 bitbake -g -u taskexp_ncurses zlib acl foo -# ERROR: Nothing PROVIDES 'foo'. Close matches: -# ofono -# $ echo $? -# 1 -# -# Self-test with no terminal example (only tests dependency fetch from bitbake): -# $ TASK_EXP_UNIT_TEST_NOTERM=1 bitbake -g -u taskexp_ncurses quilt -# $ echo $? -# 0 -# -# Features: -# * Ncurses is used for the presentation layer. Only the 'curses' -# library is used (none of the extension libraries), plus only -# one main screen is used (no sub-windows) -# * Uses the 'generateDepTreeEvent' bitbake event to fetch the -# dynamic dependency data based on passed recipes -# * Computes and provides reverse dependencies -# * Supports task sorting on: -# (a) Task dependency order within each recipe -# (b) Pure alphabetical order -# (c) Provisions for third sort order (bitbake order?) -# * The 'Filter' does a "*string*" wildcard filter on tasks in the -# main window, dynamically re-ordering and re-centering the content -# * A 'Print' function exports the selected task or its whole recipe -# task set to the default file "taskdep.txt" -# * Supports a progress bar for bitbake loads and file printing -# * Line art for box drawing supported, ASCII art an alernative -# * No horizontal scrolling support. Selected task's full name -# shown in bottom bar -# * Dynamically catches terminals that are (or become) too small -# * Exception to insure return to normal terminal on errors -# * Debugging support, self test option -# - -import sys -import traceback -import curses -import re -import time - -# Bitbake server support -import threading -from xmlrpc import client -import bb -import bb.event - -# Dependency indexes (depends_model) -(TYPE_DEP, TYPE_RDEP) = (0, 1) -DEPENDS_TYPE = 0 -DEPENDS_TASK = 1 -DEPENDS_DEPS = 2 -# Task indexes (task_list) -TASK_NAME = 0 -TASK_PRIMARY = 1 -TASK_SORT_ALPHA = 2 -TASK_SORT_DEPS = 3 -TASK_SORT_BITBAKE = 4 -# Sort options (default is SORT_DEPS) -SORT_ALPHA = 0 -SORT_DEPS = 1 -SORT_BITBAKE_ENABLE = False # NOTE: future sort -SORT_BITBAKE = 2 -sort_model = SORT_DEPS -# Print options -PRINT_MODEL_1 = 0 -PRINT_MODEL_2 = 1 -print_model = PRINT_MODEL_2 -print_file_name = "taskdep_print.log" -print_file_backup_name = "taskdep_print_backup.log" -is_printed = False -is_filter = False - -# Standard (and backup) key mappings -CHAR_NUL = 0 # Used as self-test nop char -CHAR_BS_H = 8 # Alternate backspace key -CHAR_TAB = 9 -CHAR_RETURN = 10 -CHAR_ESCAPE = 27 -CHAR_UP = ord('{') # Used as self-test ASCII char -CHAR_DOWN = ord('}') # Used as self-test ASCII char - -# Color_pair IDs -CURSES_NORMAL = 0 -CURSES_HIGHLIGHT = 1 -CURSES_WARNING = 2 - - -################################################# -### Debugging support -### - -verbose = False - -# Debug: message display slow-step through display update issues -def alert(msg,screen): - if msg: - screen.addstr(0, 10, '[%-4s]' % msg) - screen.refresh(); - curses.napms(2000) - else: - if do_line_art: - for i in range(10, 24): - screen.addch(0, i, curses.ACS_HLINE) - else: - screen.addstr(0, 10, '-' * 14) - screen.refresh(); - -# Debug: display edge conditions on frame movements -def debug_frame(nbox_ojb): - if verbose: - nbox_ojb.screen.addstr(0, 50, '[I=%2d,O=%2d,S=%3s,H=%2d,M=%4d]' % ( - nbox_ojb.cursor_index, - nbox_ojb.cursor_offset, - nbox_ojb.scroll_offset, - nbox_ojb.inside_height, - len(nbox_ojb.task_list), - )) - nbox_ojb.screen.refresh(); - -# -# Unit test (assumes that 'quilt-native' is always present) -# - -unit_test = os.environ.get('TASK_EXP_UNIT_TEST') -unit_test_cmnds=[ - '# Default selected task in primary box', - 'tst_selected=.do_recipe_qa', - '# Default selected task in deps', - 'tst_entry=', - 'tst_selected=', - '# Default selected task in rdeps', - 'tst_entry=', - 'tst_selected=.do_fetch', - "# Test 'select' back to primary box", - 'tst_entry=', - '#tst_entry=', # optional injected error - 'tst_selected=.do_fetch', - '# Check filter', - 'tst_entry=/uilt-nativ/', - 'tst_selected=quilt-native.do_recipe_qa', - '# Check print', - 'tst_entry=p', - 'tst_printed=quilt-native.do_fetch', - '#tst_printed=quilt-foo.do_nothing', # optional injected error - '# Done!', - 'tst_entry=q', -] -unit_test_idx=0 -unit_test_command_chars='' -unit_test_results=[] -def unit_test_action(active_package): - global unit_test_idx - global unit_test_command_chars - global unit_test_results - ret = CHAR_NUL - if unit_test_command_chars: - ch = unit_test_command_chars[0] - unit_test_command_chars = unit_test_command_chars[1:] - time.sleep(0.5) - ret = ord(ch) - else: - line = unit_test_cmnds[unit_test_idx] - unit_test_idx += 1 - line = re.sub('#.*', '', line).strip() - line = line.replace('',active_package.primary[0]) - line = line.replace('','\t').replace('','\n') - line = line.replace('','{').replace('','}') - if not line: line = 'nop=nop' - cmnd,value = line.split('=') - if cmnd == 'tst_entry': - unit_test_command_chars = value - elif cmnd == 'tst_selected': - active_selected = active_package.get_selected() - if active_selected != value: - unit_test_results.append("ERROR:SELFTEST:expected '%s' but got '%s' (NOTE:bitbake may have changed)" % (value,active_selected)) - ret = ord('Q') - else: - unit_test_results.append("Pass:SELFTEST:found '%s'" % (value)) - elif cmnd == 'tst_printed': - result = os.system('grep %s %s' % (value,print_file_name)) - if result: - unit_test_results.append("ERROR:PRINTTEST:expected '%s' in '%s'" % (value,print_file_name)) - ret = ord('Q') - else: - unit_test_results.append("Pass:PRINTTEST:found '%s'" % (value)) - # Return the action (CHAR_NUL for no action til next round) - return(ret) - -# Unit test without an interative terminal (e.g. ptest) -unit_test_noterm = os.environ.get('TASK_EXP_UNIT_TEST_NOTERM') - - -################################################# -### Window frame rendering -### -### By default, use the normal line art. Since -### these extended characters are not ASCII, one -### must use the ncursus API to render them -### The alternate ASCII line art set is optionally -### available via the 'do_line_art' flag - -# By default, render frames using line art -do_line_art = True - -# ASCII render set option -CHAR_HBAR = '-' -CHAR_VBAR = '|' -CHAR_UL_CORNER = '/' -CHAR_UR_CORNER = '\\' -CHAR_LL_CORNER = '\\' -CHAR_LR_CORNER = '/' - -# Box frame drawing with line-art -def line_art_frame(box): - x = box.base_x - y = box.base_y - w = box.width - h = box.height + 1 - - if do_line_art: - for i in range(1, w - 1): - box.screen.addch(y, x + i, curses.ACS_HLINE, box.color) - box.screen.addch(y + h - 1, x + i, curses.ACS_HLINE, box.color) - body_line = "%s" % (' ' * (w - 2)) - for i in range(1, h - 1): - box.screen.addch(y + i, x, curses.ACS_VLINE, box.color) - box.screen.addstr(y + i, x + 1, body_line, box.color) - box.screen.addch(y + i, x + w - 1, curses.ACS_VLINE, box.color) - box.screen.addch(y, x, curses.ACS_ULCORNER, box.color) - box.screen.addch(y, x + w - 1, curses.ACS_URCORNER, box.color) - box.screen.addch(y + h - 1, x, curses.ACS_LLCORNER, box.color) - box.screen.addch(y + h - 1, x + w - 1, curses.ACS_LRCORNER, box.color) - else: - top_line = "%s%s%s" % (CHAR_UL_CORNER,CHAR_HBAR * (w - 2),CHAR_UR_CORNER) - body_line = "%s%s%s" % (CHAR_VBAR,' ' * (w - 2),CHAR_VBAR) - bot_line = "%s%s%s" % (CHAR_UR_CORNER,CHAR_HBAR * (w - 2),CHAR_UL_CORNER) - tag_line = "%s%s%s" % ('[',CHAR_HBAR * (w - 2),']') - # Top bar - box.screen.addstr(y, x, top_line) - # Middle frame - for i in range(1, (h - 1)): - box.screen.addstr(y+i, x, body_line) - # Bottom bar - box.screen.addstr(y + (h - 1), x, bot_line) - -# Connect the separate boxes -def line_art_fixup(box): - if do_line_art: - box.screen.addch(box.base_y+2, box.base_x, curses.ACS_LTEE, box.color) - box.screen.addch(box.base_y+2, box.base_x+box.width-1, curses.ACS_RTEE, box.color) - - -################################################# -### Ncurses box object : box frame object to display -### and manage a sub-window's display elements -### using basic ncurses -### -### Supports: -### * Frame drawing, content (re)drawing -### * Content scrolling via ArrowUp, ArrowDn, PgUp, PgDN, -### * Highlighting for active selected item -### * Content sorting based on selected sort model -### - -class NBox(): - def __init__(self, screen, label, primary, base_x, base_y, width, height): - # Box description - self.screen = screen - self.label = label - self.primary = primary - self.color = curses.color_pair(CURSES_NORMAL) if screen else None - # Box boundaries - self.base_x = base_x - self.base_y = base_y - self.width = width - self.height = height - # Cursor/scroll management - self.cursor_enable = False - self.cursor_index = 0 # Absolute offset - self.cursor_offset = 0 # Frame centric offset - self.scroll_offset = 0 # Frame centric offset - # Box specific content - # Format of each entry is [package_name,is_primary_recipe,alpha_sort_key,deps_sort_key] - self.task_list = [] - - @property - def inside_width(self): - return(self.width-2) - - @property - def inside_height(self): - return(self.height-2) - - # Populate the box's content, include the sort mappings and is_primary flag - def task_list_append(self,task_name,dep): - task_sort_alpha = task_name - task_sort_deps = dep.get_dep_sort(task_name) - is_primary = False - for primary in self.primary: - if task_name.startswith(primary+'.'): - is_primary = True - if SORT_BITBAKE_ENABLE: - task_sort_bitbake = dep.get_bb_sort(task_name) - self.task_list.append([task_name,is_primary,task_sort_alpha,task_sort_deps,task_sort_bitbake]) - else: - self.task_list.append([task_name,is_primary,task_sort_alpha,task_sort_deps]) - - def reset(self): - self.task_list = [] - self.cursor_index = 0 # Absolute offset - self.cursor_offset = 0 # Frame centric offset - self.scroll_offset = 0 # Frame centric offset - - # Sort the box's content based on the current sort model - def sort(self): - if SORT_ALPHA == sort_model: - self.task_list.sort(key = lambda x: x[TASK_SORT_ALPHA]) - elif SORT_DEPS == sort_model: - self.task_list.sort(key = lambda x: x[TASK_SORT_DEPS]) - elif SORT_BITBAKE == sort_model: - self.task_list.sort(key = lambda x: x[TASK_SORT_BITBAKE]) - - # The target package list (to hightlight), from the command line - def set_primary(self,primary): - self.primary = primary - - # Draw the box's outside frame - def draw_frame(self): - line_art_frame(self) - # Title - self.screen.addstr(self.base_y, - (self.base_x + (self.width//2))-((len(self.label)+2)//2), - '['+self.label+']') - self.screen.refresh() - - # Draw the box's inside text content - def redraw(self): - task_list_len = len(self.task_list) - # Middle frame - body_line = "%s" % (' ' * (self.inside_width-1) ) - for i in range(0,self.inside_height+1): - if i < (task_list_len + self.scroll_offset): - str_ctl = "%%-%ss" % (self.width-3) - # Safety assert - if (i + self.scroll_offset) >= task_list_len: - alert("REDRAW:%2d,%4d,%4d" % (i,self.scroll_offset,task_list_len),self.screen) - break - - task_obj = self.task_list[i + self.scroll_offset] - task = task_obj[TASK_NAME][:self.inside_width-1] - task_primary = task_obj[TASK_PRIMARY] - - if task_primary: - line = str_ctl % task[:self.inside_width-1] - self.screen.addstr(self.base_y+1+i, self.base_x+2, line, curses.A_BOLD) - else: - line = str_ctl % task[:self.inside_width-1] - self.screen.addstr(self.base_y+1+i, self.base_x+2, line) - else: - line = "%s" % (' ' * (self.inside_width-1) ) - self.screen.addstr(self.base_y+1+i, self.base_x+2, line) - self.screen.refresh() - - # Show the current selected task over the bottom of the frame - def show_selected(self,selected_task): - if not selected_task: - selected_task = self.get_selected() - tag_line = "%s%s%s" % ('[',CHAR_HBAR * (self.width-2),']') - self.screen.addstr(self.base_y + self.height, self.base_x, tag_line) - self.screen.addstr(self.base_y + self.height, - (self.base_x + (self.width//2))-((len(selected_task)+2)//2), - '['+selected_task+']') - self.screen.refresh() - - # Load box with new table of content - def update_content(self,task_list): - self.task_list = task_list - if self.cursor_enable: - cursor_update(turn_on=False) - self.cursor_index = 0 - self.cursor_offset = 0 - self.scroll_offset = 0 - self.redraw() - if self.cursor_enable: - cursor_update(turn_on=True) - - # Manage the box's highlighted task and blinking cursor character - def cursor_on(self,is_on): - self.cursor_enable = is_on - self.cursor_update(is_on) - - # High-light the current pointed package, normal for released packages - def cursor_update(self,turn_on=True): - str_ctl = "%%-%ss" % (self.inside_width-1) - try: - if len(self.task_list): - task_obj = self.task_list[self.cursor_index] - task = task_obj[TASK_NAME][:self.inside_width-1] - task_primary = task_obj[TASK_PRIMARY] - task_font = curses.A_BOLD if task_primary else 0 - else: - task = '' - task_font = 0 - except Exception as e: - alert("CURSOR_UPDATE:%s" % (e),self.screen) - return - if turn_on: - self.screen.addstr(self.base_y+1+self.cursor_offset,self.base_x+1,">", curses.color_pair(CURSES_HIGHLIGHT) | curses.A_BLINK) - self.screen.addstr(self.base_y+1+self.cursor_offset,self.base_x+2,str_ctl % task, curses.color_pair(CURSES_HIGHLIGHT) | task_font) - else: - self.screen.addstr(self.base_y+1+self.cursor_offset,self.base_x+1," ") - self.screen.addstr(self.base_y+1+self.cursor_offset,self.base_x+2,str_ctl % task, task_font) - - # Down arrow - def line_down(self): - if len(self.task_list) <= (self.cursor_index+1): - return - self.cursor_update(turn_on=False) - self.cursor_index += 1 - self.cursor_offset += 1 - if self.cursor_offset > (self.inside_height): - self.cursor_offset -= 1 - self.scroll_offset += 1 - self.redraw() - self.cursor_update(turn_on=True) - debug_frame(self) - - # Up arrow - def line_up(self): - if 0 > (self.cursor_index-1): - return - self.cursor_update(turn_on=False) - self.cursor_index -= 1 - self.cursor_offset -= 1 - if self.cursor_offset < 0: - self.cursor_offset += 1 - self.scroll_offset -= 1 - self.redraw() - self.cursor_update(turn_on=True) - debug_frame(self) - - # Page down - def page_down(self): - max_task = len(self.task_list)-1 - if max_task < self.inside_height: - return - self.cursor_update(turn_on=False) - self.cursor_index += 10 - self.cursor_index = min(self.cursor_index,max_task) - self.cursor_offset = min(self.inside_height,self.cursor_index) - self.scroll_offset = self.cursor_index - self.cursor_offset - self.redraw() - self.cursor_update(turn_on=True) - debug_frame(self) - - # Page up - def page_up(self): - max_task = len(self.task_list)-1 - if max_task < self.inside_height: - return - self.cursor_update(turn_on=False) - self.cursor_index -= 10 - self.cursor_index = max(self.cursor_index,0) - self.cursor_offset = max(0, self.inside_height - (max_task - self.cursor_index)) - self.scroll_offset = self.cursor_index - self.cursor_offset - self.redraw() - self.cursor_update(turn_on=True) - debug_frame(self) - - # Return the currently selected task name for this box - def get_selected(self): - if self.task_list: - return(self.task_list[self.cursor_index][TASK_NAME]) - else: - return('') - -################################################# -### The helper sub-windows -### - -# Show persistent help at the top of the screen -class HelpBarView(NBox): - def __init__(self, screen, label, primary, base_x, base_y, width, height): - super(HelpBarView, self).__init__(screen, label, primary, base_x, base_y, width, height) - - def show_help(self,show): - self.screen.addstr(self.base_y,self.base_x, "%s" % (' ' * self.inside_width)) - if show: - help = "Help='?' Filter='/' NextBox= Select= Print='p','P' Quit='q'" - bar_size = self.inside_width - 5 - len(help) - self.screen.addstr(self.base_y,self.base_x+((self.inside_width-len(help))//2), help) - self.screen.refresh() - -# Pop up a detailed Help box -class HelpBoxView(NBox): - def __init__(self, screen, label, primary, base_x, base_y, width, height, dep): - super(HelpBoxView, self).__init__(screen, label, primary, base_x, base_y, width, height) - self.x_pos = 0 - self.y_pos = 0 - self.dep = dep - - # Instantial the pop-up help box - def show_help(self,show): - self.x_pos = self.base_x + 4 - self.y_pos = self.base_y + 2 - - def add_line(line): - if line: - self.screen.addstr(self.y_pos,self.x_pos,line) - self.y_pos += 1 - - # Gather some statisics - dep_count = 0 - rdep_count = 0 - for task_obj in self.dep.depends_model: - if TYPE_DEP == task_obj[DEPENDS_TYPE]: - dep_count += 1 - elif TYPE_RDEP == task_obj[DEPENDS_TYPE]: - rdep_count += 1 - - self.draw_frame() - line_art_fixup(self.dep) - add_line("Quit : 'q' ") - add_line("Filter task names : '/'") - add_line("Tab to next box : ") - add_line("Select a task : ") - add_line("Print task's deps : 'p'") - add_line("Print recipe's deps : 'P'") - add_line(" -> '%s'" % print_file_name) - add_line("Sort toggle : 's'") - add_line(" %s Recipe inner-depends order" % ('->' if (SORT_DEPS == sort_model) else '- ')) - add_line(" %s Alpha-numeric order" % ('->' if (SORT_ALPHA == sort_model) else '- ')) - if SORT_BITBAKE_ENABLE: - add_line(" %s Bitbake order" % ('->' if (TASK_SORT_BITBAKE == sort_model) else '- ')) - add_line("Alternate backspace : ") - add_line("") - add_line("Primary recipes = %s" % ','.join(self.primary)) - add_line("Task count = %4d" % len(self.dep.pkg_model)) - add_line("Deps count = %4d" % dep_count) - add_line("RDeps count = %4d" % rdep_count) - add_line("") - self.screen.addstr(self.y_pos,self.x_pos+7,"", curses.color_pair(CURSES_HIGHLIGHT)) - self.screen.refresh() - c = self.screen.getch() - -# Show a progress bar -class ProgressView(NBox): - def __init__(self, screen, label, primary, base_x, base_y, width, height): - super(ProgressView, self).__init__(screen, label, primary, base_x, base_y, width, height) - - def progress(self,title,current,max): - if title: - self.label = title - else: - title = self.label - if max <=0: max = 10 - bar_size = self.width - 7 - len(title) - bar_done = int( (float(current)/float(max)) * float(bar_size) ) - self.screen.addstr(self.base_y,self.base_x, " %s:[%s%s]" % (title,'*' * bar_done,' ' * (bar_size-bar_done))) - self.screen.refresh() - return(current+1) - - def clear(self): - self.screen.addstr(self.base_y,self.base_x, "%s" % (' ' * self.width)) - self.screen.refresh() - -# Implement a task filter bar -class FilterView(NBox): - SEARCH_NOP = 0 - SEARCH_GO = 1 - SEARCH_CANCEL = 2 - - def __init__(self, screen, label, primary, base_x, base_y, width, height): - super(FilterView, self).__init__(screen, label, primary, base_x, base_y, width, height) - self.do_show = False - self.filter_str = "" - - def clear(self,enable_show=True): - self.filter_str = "" - - def show(self,enable_show=True): - self.do_show = enable_show - if self.do_show: - self.screen.addstr(self.base_y,self.base_x, "[ Filter: %-25s ] '/'=cancel, format='abc' " % self.filter_str[0:25]) - else: - self.screen.addstr(self.base_y,self.base_x, "%s" % (' ' * self.width)) - self.screen.refresh() - - def show_prompt(self): - self.screen.addstr(self.base_y,self.base_x + 10 + len(self.filter_str), " ") - self.screen.addstr(self.base_y,self.base_x + 10 + len(self.filter_str), "") - - # Keys specific to the filter box (start/stop filter keys are in the main loop) - def input(self,c,ch): - ret = self.SEARCH_GO - if c in (curses.KEY_BACKSPACE,CHAR_BS_H): - # Backspace - if self.filter_str: - self.filter_str = self.filter_str[0:-1] - self.show() - elif ((ch >= 'a') and (ch <= 'z')) or ((ch >= 'A') and (ch <= 'Z')) or ((ch >= '0') and (ch <= '9')) or (ch in (' ','_','.','-')): - # The isalnum() acts strangly with keypad(True), so explicit bounds - self.filter_str += ch - self.show() - else: - ret = self.SEARCH_NOP - return(ret) - - -################################################# -### The primary dependency windows -### - -# The main list of package tasks -class PackageView(NBox): - def __init__(self, screen, label, primary, base_x, base_y, width, height): - super(PackageView, self).__init__(screen, label, primary, base_x, base_y, width, height) - - # Find and verticaly center a selected task (from filter or from dependent box) - # The 'task_filter_str' can be a full or a partial (filter) task name - def find(self,task_filter_str): - found = False - max = self.height-2 - if not task_filter_str: - return(found) - for i,task_obj in enumerate(self.task_list): - task = task_obj[TASK_NAME] - if task.startswith(task_filter_str): - self.cursor_on(False) - self.cursor_index = i - - # Position selected at vertical center - vcenter = self.inside_height // 2 - if self.cursor_index <= vcenter: - self.scroll_offset = 0 - self.cursor_offset = self.cursor_index - elif self.cursor_index >= (len(self.task_list) - vcenter - 1): - self.cursor_offset = self.inside_height-1 - self.scroll_offset = self.cursor_index - self.cursor_offset - else: - self.cursor_offset = vcenter - self.scroll_offset = self.cursor_index - self.cursor_offset - - self.redraw() - self.cursor_on(True) - found = True - break - return(found) - -# The view of dependent packages -class PackageDepView(NBox): - def __init__(self, screen, label, primary, base_x, base_y, width, height): - super(PackageDepView, self).__init__(screen, label, primary, base_x, base_y, width, height) - -# The view of reverse-dependent packages -class PackageReverseDepView(NBox): - def __init__(self, screen, label, primary, base_x, base_y, width, height): - super(PackageReverseDepView, self).__init__(screen, label, primary, base_x, base_y, width, height) - - -################################################# -### DepExplorer : The parent frame and object -### - -class DepExplorer(NBox): - def __init__(self,screen): - title = "Task Dependency Explorer" - super(DepExplorer, self).__init__(screen, 'Task Dependency Explorer','',0,0,80,23) - - self.screen = screen - self.pkg_model = [] - self.depends_model = [] - self.dep_sort_map = {} - self.bb_sort_map = {} - self.filter_str = '' - self.filter_prev = 'deadbeef' - - if self.screen: - self.help_bar_view = HelpBarView(screen, "Help",'',1,1,79,1) - self.help_box_view = HelpBoxView(screen, "Help",'',0,2,40,20,self) - self.progress_view = ProgressView(screen, "Progress",'',2,1,76,1) - self.filter_view = FilterView(screen, "Filter",'',2,1,76,1) - self.package_view = PackageView(screen, "Package",'alpha', 0,2,40,20) - self.dep_view = PackageDepView(screen, "Dependencies",'beta',40,2,40,10) - self.reverse_view = PackageReverseDepView(screen, "Dependent Tasks",'gamma',40,13,40,9) - self.draw_frames() - - # Draw this main window's frame and all sub-windows - def draw_frames(self): - self.draw_frame() - self.package_view.draw_frame() - self.dep_view.draw_frame() - self.reverse_view.draw_frame() - if is_filter: - self.filter_view.show(True) - self.filter_view.show_prompt() - else: - self.help_bar_view.show_help(True) - self.package_view.redraw() - self.dep_view.redraw() - self.reverse_view.redraw() - self.show_selected(self.package_view.get_selected()) - line_art_fixup(self) - - # Parse the bitbake dependency event object - def parse(self, depgraph): - for task in depgraph["tdepends"]: - self.pkg_model.insert(0, task) - for depend in depgraph["tdepends"][task]: - self.depends_model.insert (0, (TYPE_DEP, task, depend)) - self.depends_model.insert (0, (TYPE_RDEP, depend, task)) - if self.screen: - self.dep_sort_prep() - - # Prepare the dependency sort order keys - # This method creates sort keys per recipe tasks in - # the order of each recipe's internal dependecies - # Method: - # Filter the tasks in dep order in dep_sort_map = {} - # (a) Find a task that has no dependecies - # Ignore non-recipe specific tasks - # (b) Add it to the sort mapping dict with - # key of "_" - # (c) Remove it as a dependency from the other tasks - # (d) Repeat till all tasks are mapped - # Use placeholders to insure each sub-dict is instantiated - def dep_sort_prep(self): - self.progress_view.progress('DepSort',0,4) - # Init the task base entries - self.progress_view.progress('DepSort',1,4) - dep_table = {} - bb_index = 0 - for task in self.pkg_model: - # First define the incoming bitbake sort order - self.bb_sort_map[task] = "%04d" % (bb_index) - bb_index += 1 - task_group = task[0:task.find('.')] - if task_group not in dep_table: - dep_table[task_group] = {} - dep_table[task_group]['-'] = {} # Placeholder - if task not in dep_table[task_group]: - dep_table[task_group][task] = {} - dep_table[task_group][task]['-'] = {} # Placeholder - # Add the task dependecy entries - self.progress_view.progress('DepSort',2,4) - for task_obj in self.depends_model: - if task_obj[DEPENDS_TYPE] != TYPE_DEP: - continue - task = task_obj[DEPENDS_TASK] - task_dep = task_obj[DEPENDS_DEPS] - task_group = task[0:task.find('.')] - # Only track depends within same group - if task_dep.startswith(task_group+'.'): - dep_table[task_group][task][task_dep] = 1 - self.progress_view.progress('DepSort',3,4) - for task_group in dep_table: - dep_index = 0 - # Whittle down the tasks of each group - this_pass = 1 - do_loop = True - while (len(dep_table[task_group]) > 1) and do_loop: - this_pass += 1 - is_change = False - delete_list = [] - for task in dep_table[task_group]: - if '-' == task: - continue - if 1 == len(dep_table[task_group][task]): - is_change = True - # No more deps, so collect this task... - self.dep_sort_map[task] = "%s_%04d" % (task_group,dep_index) - dep_index += 1 - # ... remove it from other lists as resolved ... - for dep_task in dep_table[task_group]: - if task in dep_table[task_group][dep_task]: - del dep_table[task_group][dep_task][task] - # ... and remove it from from the task group - delete_list.append(task) - for task in delete_list: - del dep_table[task_group][task] - if not is_change: - alert("ERROR:DEP_SIEVE_NO_CHANGE:%s" % task_group,self.screen) - do_loop = False - continue - self.progress_view.progress('',4,4) - self.progress_view.clear() - self.help_bar_view.show_help(True) - if len(self.dep_sort_map) != len(self.pkg_model): - alert("ErrorDepSort:%d/%d" % (len(self.dep_sort_map),len(self.pkg_model)),self.screen) - - # Look up a dep sort order key - def get_dep_sort(self,key): - if key in self.dep_sort_map: - return(self.dep_sort_map[key]) - else: - return(key) - - # Look up a bitbake sort order key - def get_bb_sort(self,key): - if key in self.bb_sort_map: - return(self.bb_sort_map[key]) - else: - return(key) - - # Find the selected package in the main frame, update the dependency frames content accordingly - def select(self, package_name, only_update_dependents=False): - if not package_name: - package_name = self.package_view.get_selected() - # alert("SELECT:%s:" % package_name,self.screen) - - if self.filter_str != self.filter_prev: - self.package_view.cursor_on(False) - # Fill of the main package task list using new filter - self.package_view.task_list = [] - for package in self.pkg_model: - if self.filter_str: - if self.filter_str in package: - self.package_view.task_list_append(package,self) - else: - self.package_view.task_list_append(package,self) - self.package_view.sort() - self.filter_prev = self.filter_str - - # Old position is lost, assert new position of previous task (if still filtered in) - self.package_view.cursor_index = 0 - self.package_view.cursor_offset = 0 - self.package_view.scroll_offset = 0 - self.package_view.redraw() - self.package_view.cursor_on(True) - - # Make sure the selected package is in view, with implicit redraw() - if (not only_update_dependents): - self.package_view.find(package_name) - # In case selected name change (i.e. filter removed previous) - package_name = self.package_view.get_selected() - - # Filter the package's dependent list to the dependent view - self.dep_view.reset() - for package_def in self.depends_model: - if (package_def[DEPENDS_TYPE] == TYPE_DEP) and (package_def[DEPENDS_TASK] == package_name): - self.dep_view.task_list_append(package_def[DEPENDS_DEPS],self) - self.dep_view.sort() - self.dep_view.redraw() - # Filter the package's dependent list to the reverse dependent view - self.reverse_view.reset() - for package_def in self.depends_model: - if (package_def[DEPENDS_TYPE] == TYPE_RDEP) and (package_def[DEPENDS_TASK] == package_name): - self.reverse_view.task_list_append(package_def[DEPENDS_DEPS],self) - self.reverse_view.sort() - self.reverse_view.redraw() - self.show_selected(package_name) - self.screen.refresh() - - # The print-to-file method - def print_deps(self,whole_group=False): - global is_printed - # Print the selected deptree(s) to a file - if not is_printed: - try: - # Move to backup any exiting file before first write - if os.path.isfile(print_file_name): - os.system('mv -f %s %s' % (print_file_name,print_file_backup_name)) - except Exception as e: - alert(e,self.screen) - alert('',self.screen) - print_list = [] - selected_task = self.package_view.get_selected() - if not selected_task: - return - if not whole_group: - print_list.append(selected_task) - else: - # Use the presorted task_group order from 'package_view' - task_group = selected_task[0:selected_task.find('.')+1] - for task_obj in self.package_view.task_list: - task = task_obj[TASK_NAME] - if task.startswith(task_group): - print_list.append(task) - with open(print_file_name, "a") as fd: - print_max = len(print_list) - print_count = 1 - self.progress_view.progress('Write "%s"' % print_file_name,0,print_max) - for task in print_list: - print_count = self.progress_view.progress('',print_count,print_max) - self.select(task) - self.screen.refresh(); - # Utilize the current print output model - if print_model == PRINT_MODEL_1: - print("=== Dependendency Snapshot ===",file=fd) - print(" = Package =",file=fd) - print(' '+task,file=fd) - # Fill in the matching dependencies - print(" = Dependencies =",file=fd) - for task_obj in self.dep_view.task_list: - print(' '+ task_obj[TASK_NAME],file=fd) - print(" = Dependent Tasks =",file=fd) - for task_obj in self.reverse_view.task_list: - print(' '+ task_obj[TASK_NAME],file=fd) - if print_model == PRINT_MODEL_2: - print("=== Dependendency Snapshot ===",file=fd) - dep_count = len(self.dep_view.task_list) - 1 - for i,task_obj in enumerate(self.dep_view.task_list): - print('%s%s' % ("Dep =" if (i==dep_count) else " ",task_obj[TASK_NAME]),file=fd) - if not self.dep_view.task_list: - print('Dep =',file=fd) - print("Package=%s" % task,file=fd) - for i,task_obj in enumerate(self.reverse_view.task_list): - print('%s%s' % ("RDep =" if (i==0) else " ",task_obj[TASK_NAME]),file=fd) - if not self.reverse_view.task_list: - print('RDep =',file=fd) - curses.napms(2000) - self.progress_view.clear() - self.help_bar_view.show_help(True) - print('',file=fd) - # Restore display to original selected task - self.select(selected_task) - is_printed = True - -################################################# -### Load bitbake data -### - -def bitbake_load(server, eventHandler, params, dep, curses_off, screen): - global bar_len_old - bar_len_old = 0 - - # Support no screen - def progress(msg,count,max): - global bar_len_old - if screen: - dep.progress_view.progress(msg,count,max) - else: - if msg: - if bar_len_old: - bar_len_old = 0 - print("\n") - print(f"{msg}: ({count} of {max})") - else: - bar_len = int((count*40)/max) - if bar_len_old != bar_len: - print(f"{'*' * (bar_len-bar_len_old)}",end='',flush=True) - bar_len_old = bar_len - def clear(): - if screen: - dep.progress_view.clear() - def clear_curses(screen): - if screen: - curses_off(screen) - - # - # Trigger bitbake "generateDepTreeEvent" - # - - cmdline = '' - try: - params.updateToServer(server, os.environ.copy()) - params.updateFromServer(server) - cmdline = params.parseActions() - if not cmdline: - clear_curses(screen) - print("ERROR: nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.") - return 1,cmdline - if 'msg' in cmdline and cmdline['msg']: - clear_curses(screen) - print('ERROR: ' + cmdline['msg']) - return 1,cmdline - cmdline = cmdline['action'] - if not cmdline or cmdline[0] != "generateDotGraph": - clear_curses(screen) - print("ERROR: This UI requires the -g option") - return 1,cmdline - ret, error = server.runCommand(["generateDepTreeEvent", cmdline[1], cmdline[2]]) - if error: - clear_curses(screen) - print("ERROR: running command '%s': %s" % (cmdline, error)) - return 1,cmdline - elif not ret: - clear_curses(screen) - print("ERROR: running command '%s': returned %s" % (cmdline, ret)) - return 1,cmdline - except client.Fault as x: - clear_curses(screen) - print("ERROR: XMLRPC Fault getting commandline:\n %s" % x) - return 1,cmdline - except Exception as e: - clear_curses(screen) - print("ERROR: in startup:\n %s" % traceback.format_exc()) - return 1,cmdline - - # - # Receive data from bitbake - # - - progress_total = 0 - load_bitbake = True - quit = False - try: - while load_bitbake: - try: - event = eventHandler.waitEvent(0.25) - if quit: - _, error = server.runCommand(["stateForceShutdown"]) - clear_curses(screen) - if error: - print('Unable to cleanly stop: %s' % error) - break - - if event is None: - continue - - if isinstance(event, bb.event.CacheLoadStarted): - progress_total = event.total - progress('Loading Cache',0,progress_total) - continue - - if isinstance(event, bb.event.CacheLoadProgress): - x = event.current - progress('',x,progress_total) - continue - - if isinstance(event, bb.event.CacheLoadCompleted): - clear() - progress('Bitbake... ',1,2) - continue - - if isinstance(event, bb.event.ParseStarted): - progress_total = event.total - progress('Processing recipes',0,progress_total) - if progress_total == 0: - continue - - if isinstance(event, bb.event.ParseProgress): - x = event.current - progress('',x,progress_total) - continue - - if isinstance(event, bb.event.ParseCompleted): - progress('Generating dependency tree',0,3) - continue - - if isinstance(event, bb.event.DepTreeGenerated): - progress('Generating dependency tree',1,3) - dep.parse(event._depgraph) - progress('Generating dependency tree',2,3) - - if isinstance(event, bb.command.CommandCompleted): - load_bitbake = False - progress('Generating dependency tree',3,3) - clear() - if screen: - dep.help_bar_view.show_help(True) - continue - - if isinstance(event, bb.event.NoProvider): - clear_curses(screen) - print('ERROR: %s' % event) - - _, error = server.runCommand(["stateShutdown"]) - if error: - print('ERROR: Unable to cleanly shutdown: %s' % error) - return 1,cmdline - - if isinstance(event, bb.command.CommandFailed): - clear_curses(screen) - print('ERROR: ' + str(event)) - return event.exitcode,cmdline - - if isinstance(event, bb.command.CommandExit): - clear_curses(screen) - return event.exitcode,cmdline - - if isinstance(event, bb.cooker.CookerExit): - break - - continue - except EnvironmentError as ioerror: - # ignore interrupted io - if ioerror.args[0] == 4: - pass - except KeyboardInterrupt: - if shutdown == 2: - clear_curses(screen) - print("\nThird Keyboard Interrupt, exit.\n") - break - if shutdown == 1: - clear_curses(screen) - print("\nSecond Keyboard Interrupt, stopping...\n") - _, error = server.runCommand(["stateForceShutdown"]) - if error: - print('Unable to cleanly stop: %s' % error) - if shutdown == 0: - clear_curses(screen) - print("\nKeyboard Interrupt, closing down...\n") - _, error = server.runCommand(["stateShutdown"]) - if error: - print('Unable to cleanly shutdown: %s' % error) - shutdown = shutdown + 1 - pass - except Exception as e: - # Safe exit on error - clear_curses(screen) - print("Exception : %s" % e) - print("Exception in startup:\n %s" % traceback.format_exc()) - - return 0,cmdline - -################################################# -### main -### - -SCREEN_COL_MIN = 83 -SCREEN_ROW_MIN = 26 - -def main(server, eventHandler, params): - global verbose - global sort_model - global print_model - global is_printed - global is_filter - global screen_too_small - - shutdown = 0 - screen_too_small = False - quit = False - - # Unit test with no terminal? - if unit_test_noterm: - # Load bitbake, test that there is valid dependency data, then exit - screen = None - print("* UNIT TEST:START") - dep = DepExplorer(screen) - print("* UNIT TEST:BITBAKE FETCH") - ret,cmdline = bitbake_load(server, eventHandler, params, dep, None, screen) - if ret: - print("* UNIT TEST: BITBAKE FAILED") - return ret - # Test the acquired dependency data - quilt_native_deps = 0 - quilt_native_rdeps = 0 - quilt_deps = 0 - quilt_rdeps = 0 - for i,task_obj in enumerate(dep.depends_model): - if TYPE_DEP == task_obj[0]: - task = task_obj[1] - if task.startswith('quilt-native'): - quilt_native_deps += 1 - elif task.startswith('quilt'): - quilt_deps += 1 - elif TYPE_RDEP == task_obj[0]: - task = task_obj[1] - if task.startswith('quilt-native'): - quilt_native_rdeps += 1 - elif task.startswith('quilt'): - quilt_rdeps += 1 - # Print results - failed = False - if 0 < len(dep.depends_model): - print(f"Pass:Bitbake dependency count = {len(dep.depends_model)}") - else: - failed = True - print(f"FAIL:Bitbake dependency count = 0") - if quilt_native_deps: - print(f"Pass:Quilt-native depends count = {quilt_native_deps}") - else: - failed = True - print(f"FAIL:Quilt-native depends count = 0") - if quilt_native_rdeps: - print(f"Pass:Quilt-native rdepends count = {quilt_native_rdeps}") - else: - failed = True - print(f"FAIL:Quilt-native rdepends count = 0") - if quilt_deps: - print(f"Pass:Quilt depends count = {quilt_deps}") - else: - failed = True - print(f"FAIL:Quilt depends count = 0") - if quilt_rdeps: - print(f"Pass:Quilt rdepends count = {quilt_rdeps}") - else: - failed = True - print(f"FAIL:Quilt rdepends count = 0") - print("* UNIT TEST:STOP") - return failed - - # Help method to dynamically test parent window too small - def check_screen_size(dep, active_package): - global screen_too_small - rows, cols = screen.getmaxyx() - if (rows >= SCREEN_ROW_MIN) and (cols >= SCREEN_COL_MIN): - if screen_too_small: - # Now big enough, remove error message and redraw screen - dep.draw_frames() - active_package.cursor_on(True) - screen_too_small = False - return True - # Test on App init - if not dep: - # Do not start this app if screen not big enough - curses.endwin() - print("") - print("ERROR(Taskexp_cli): Mininal screen size is %dx%d" % (SCREEN_COL_MIN,SCREEN_ROW_MIN)) - print("Current screen is Cols=%s,Rows=%d" % (cols,rows)) - return False - # First time window too small - if not screen_too_small: - active_package.cursor_on(False) - dep.screen.addstr(0,2,'[BIGGER WINDOW PLEASE]', curses.color_pair(CURSES_WARNING) | curses.A_BLINK) - screen_too_small = True - return False - - # Helper method to turn off curses mode - def curses_off(screen): - if not screen: return - # Safe error exit - screen.keypad(False) - curses.echo() - curses.curs_set(1) - curses.endwin() - - if unit_test_results: - print('\nUnit Test Results:') - for line in unit_test_results: - print(" %s" % line) - - # - # Initialize the ncurse environment - # - - screen = curses.initscr() - try: - if not check_screen_size(None, None): - exit(1) - try: - curses.start_color() - curses.use_default_colors(); - curses.init_pair(0xFF, curses.COLOR_BLACK, curses.COLOR_WHITE); - curses.init_pair(CURSES_NORMAL, curses.COLOR_WHITE, curses.COLOR_BLACK) - curses.init_pair(CURSES_HIGHLIGHT, curses.COLOR_WHITE, curses.COLOR_BLUE) - curses.init_pair(CURSES_WARNING, curses.COLOR_WHITE, curses.COLOR_RED) - except: - curses.endwin() - print("") - print("ERROR(Taskexp_cli): Requires 256 colors. Please use this or the equivalent:") - print(" $ export TERM='xterm-256color'") - exit(1) - - screen.keypad(True) - curses.noecho() - curses.curs_set(0) - screen.refresh(); - except Exception as e: - # Safe error exit - curses_off(screen) - print("Exception : %s" % e) - print("Exception in startup:\n %s" % traceback.format_exc()) - exit(1) - - try: - # - # Instantiate the presentation layers - # - - dep = DepExplorer(screen) - - # - # Prepare bitbake - # - - # Fetch bitbake dependecy data - ret,cmdline = bitbake_load(server, eventHandler, params, dep, curses_off, screen) - if ret: return ret - - # - # Preset the views - # - - # Cmdline example = ['generateDotGraph', ['acl', 'zlib'], 'build'] - primary_packages = cmdline[1] - dep.package_view.set_primary(primary_packages) - dep.dep_view.set_primary(primary_packages) - dep.reverse_view.set_primary(primary_packages) - dep.help_box_view.set_primary(primary_packages) - dep.help_bar_view.show_help(True) - active_package = dep.package_view - active_package.cursor_on(True) - dep.select(primary_packages[0]+'.') - if unit_test: - alert('UNIT_TEST',screen) - - # Help method to start/stop the filter feature - def filter_mode(new_filter_status): - global is_filter - if is_filter == new_filter_status: - # Ignore no changes - return - if not new_filter_status: - # Turn off - curses.curs_set(0) - #active_package.cursor_on(False) - active_package = dep.package_view - active_package.cursor_on(True) - is_filter = False - dep.help_bar_view.show_help(True) - dep.filter_str = '' - dep.select('') - else: - # Turn on - curses.curs_set(1) - dep.help_bar_view.show_help(False) - dep.filter_view.clear() - dep.filter_view.show(True) - dep.filter_view.show_prompt() - is_filter = True - - # - # Main user loop - # - - while not quit: - if is_filter: - dep.filter_view.show_prompt() - if unit_test: - c = unit_test_action(active_package) - else: - c = screen.getch() - ch = chr(c) - - # Do not draw if window now too small - if not check_screen_size(dep,active_package): - continue - - if verbose: - if c == CHAR_RETURN: - screen.addstr(0, 4, "|%3d,CR |" % (c)) - else: - screen.addstr(0, 4, "|%3d,%3s|" % (c,chr(c))) - - # pre-map alternate filter close keys - if is_filter and (c == CHAR_ESCAPE): - # Alternate exit from filter - ch = '/' - c = ord(ch) - - # Filter and non-filter mode command keys - # https://docs.python.org/3/library/curses.html - if c in (curses.KEY_UP,CHAR_UP): - active_package.line_up() - if active_package == dep.package_view: - dep.select('',only_update_dependents=True) - elif c in (curses.KEY_DOWN,CHAR_DOWN): - active_package.line_down() - if active_package == dep.package_view: - dep.select('',only_update_dependents=True) - elif curses.KEY_PPAGE == c: - active_package.page_up() - if active_package == dep.package_view: - dep.select('',only_update_dependents=True) - elif curses.KEY_NPAGE == c: - active_package.page_down() - if active_package == dep.package_view: - dep.select('',only_update_dependents=True) - elif CHAR_TAB == c: - # Tab between boxes - active_package.cursor_on(False) - if active_package == dep.package_view: - active_package = dep.dep_view - elif active_package == dep.dep_view: - active_package = dep.reverse_view - else: - active_package = dep.package_view - active_package.cursor_on(True) - elif curses.KEY_BTAB == c: - # Shift-Tab reverse between boxes - active_package.cursor_on(False) - if active_package == dep.package_view: - active_package = dep.reverse_view - elif active_package == dep.reverse_view: - active_package = dep.dep_view - else: - active_package = dep.package_view - active_package.cursor_on(True) - elif (CHAR_RETURN == c): - # CR to select - selected = active_package.get_selected() - if selected: - active_package.cursor_on(False) - active_package = dep.package_view - filter_mode(False) - dep.select(selected) - else: - filter_mode(False) - dep.select(primary_packages[0]+'.') - - elif '/' == ch: # Enter/exit dep.filter_view - if is_filter: - filter_mode(False) - else: - filter_mode(True) - elif is_filter: - # If in filter mode, re-direct all these other keys to the filter box - result = dep.filter_view.input(c,ch) - dep.filter_str = dep.filter_view.filter_str - dep.select('') - - # Non-filter mode command keys - elif 'p' == ch: - dep.print_deps(whole_group=False) - elif 'P' == ch: - dep.print_deps(whole_group=True) - elif 'w' == ch: - # Toggle the print model - if print_model == PRINT_MODEL_1: - print_model = PRINT_MODEL_2 - else: - print_model = PRINT_MODEL_1 - elif 's' == ch: - # Toggle the sort model - if sort_model == SORT_DEPS: - sort_model = SORT_ALPHA - elif sort_model == SORT_ALPHA: - if SORT_BITBAKE_ENABLE: - sort_model = TASK_SORT_BITBAKE - else: - sort_model = SORT_DEPS - else: - sort_model = SORT_DEPS - active_package.cursor_on(False) - current_task = active_package.get_selected() - dep.package_view.sort() - dep.dep_view.sort() - dep.reverse_view.sort() - active_package = dep.package_view - active_package.cursor_on(True) - dep.select(current_task) - # Announce the new sort model - alert("SORT=%s" % ("ALPHA" if (sort_model == SORT_ALPHA) else "DEPS"),screen) - alert('',screen) - - elif 'q' == ch: - quit = True - elif ch in ('h','?'): - dep.help_box_view.show_help(True) - dep.select(active_package.get_selected()) - - # - # Debugging commands - # - - elif 'V' == ch: - verbose = not verbose - alert('Verbose=%s' % str(verbose),screen) - alert('',screen) - elif 'R' == ch: - screen.refresh() - elif 'B' == ch: - # Progress bar unit test - dep.progress_view.progress('Test',0,40) - curses.napms(1000) - dep.progress_view.progress('',10,40) - curses.napms(1000) - dep.progress_view.progress('',20,40) - curses.napms(1000) - dep.progress_view.progress('',30,40) - curses.napms(1000) - dep.progress_view.progress('',40,40) - curses.napms(1000) - dep.progress_view.clear() - dep.help_bar_view.show_help(True) - elif 'Q' == ch: - # Simulated error - curses_off(screen) - print('ERROR: simulated error exit') - return 1 - - # Safe exit - curses_off(screen) - except Exception as e: - # Safe exit on error - curses_off(screen) - print("Exception : %s" % e) - print("Exception in startup:\n %s" % traceback.format_exc()) - - # Reminder to pick up your printed results - if is_printed: - print("") - print("You have output ready!") - print(" * Your printed dependency file is: %s" % print_file_name) - print(" * Your previous results saved in: %s" % print_file_backup_name) - print("") diff --git a/bitbake/lib/bb/ui/teamcity.py b/bitbake/lib/bb/ui/teamcity.py deleted file mode 100644 index 7eeaab8d63..0000000000 --- a/bitbake/lib/bb/ui/teamcity.py +++ /dev/null @@ -1,391 +0,0 @@ -# -# TeamCity UI Implementation -# -# Implements a TeamCity frontend for the BitBake utility, via service messages. -# See https://www.jetbrains.com/help/teamcity/build-script-interaction-with-teamcity.html -# -# Based on ncurses.py and knotty.py, variously by Michael Lauer and Richard Purdie -# -# Copyright (C) 2006 Michael 'Mickey' Lauer -# Copyright (C) 2006-2012 Richard Purdie -# Copyright (C) 2018-2020 Agilent Technologies, Inc. -# -# SPDX-License-Identifier: GPL-2.0-only -# -# Author: Chris Laplante - -from __future__ import division - -import datetime -import logging -import math -import os -import re -import sys -import xmlrpc.client -from collections import deque - -import bb -import bb.build -import bb.command -import bb.cooker -import bb.event -import bb.runqueue -from bb.ui import uihelper - -logger = logging.getLogger("BitBake") - - -class TeamCityUI: - def __init__(self): - self._block_stack = [] - self._last_progress_state = None - - @classmethod - def escape_service_value(cls, value): - """ - Escape a value for inclusion in a service message. TeamCity uses the vertical pipe character for escaping. - See: https://confluence.jetbrains.com/display/TCD10/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-Escapedvalues - """ - return re.sub(r"(['|\[\]])", r"|\1", value).replace("\n", "|n").replace("\r", "|r") - - @classmethod - def emit_service_message(cls, message_type, **kwargs): - print(cls.format_service_message(message_type, **kwargs), flush=True) - - @classmethod - def format_service_message(cls, message_type, **kwargs): - payload = " ".join(["{0}='{1}'".format(k, cls.escape_service_value(v)) for k, v in kwargs.items()]) - return "##teamcity[{0} {1}]".format(message_type, payload) - - @classmethod - def emit_simple_service_message(cls, message_type, message): - print(cls.format_simple_service_message(message_type, message), flush=True) - - @classmethod - def format_simple_service_message(cls, message_type, message): - return "##teamcity[{0} '{1}']".format(message_type, cls.escape_service_value(message)) - - @classmethod - def format_build_message(cls, text, status): - return cls.format_service_message("message", text=text, status=status) - - def block_start(self, name): - self._block_stack.append(name) - self.emit_service_message("blockOpened", name=name) - - def block_end(self): - if self._block_stack: - name = self._block_stack.pop() - self.emit_service_message("blockClosed", name=name) - - def progress(self, message, percent, extra=None): - now = datetime.datetime.now() - percent = "{0: >3.0f}".format(percent) - - report = False - if not self._last_progress_state \ - or (self._last_progress_state[0] == message - and self._last_progress_state[1] != percent - and (now - self._last_progress_state[2]).microseconds >= 5000) \ - or self._last_progress_state[0] != message: - report = True - self._last_progress_state = (message, percent, now) - - if report or percent in [0, 100]: - self.emit_simple_service_message("progressMessage", "{0}: {1}%{2}".format(message, percent, extra or "")) - - -class TeamcityLogFormatter(logging.Formatter): - def format(self, record): - details = "" - if hasattr(record, 'bb_exc_formatted'): - details = ''.join(record.bb_exc_formatted) - - if record.levelno in [bb.msg.BBLogFormatter.ERROR, bb.msg.BBLogFormatter.CRITICAL]: - # ERROR gets a separate errorDetails field - msg = TeamCityUI.format_service_message("message", text=record.getMessage(), status="ERROR", - errorDetails=details) - else: - payload = record.getMessage() - if details: - payload += "\n" + details - if record.levelno == bb.msg.BBLogFormatter.PLAIN: - msg = payload - elif record.levelno == bb.msg.BBLogFormatter.WARNING: - msg = TeamCityUI.format_service_message("message", text=payload, status="WARNING") - else: - msg = TeamCityUI.format_service_message("message", text=payload, status="NORMAL") - - return msg - - -_evt_list = ["bb.runqueue.runQueueExitWait", "bb.event.LogExecTTY", "logging.LogRecord", - "bb.build.TaskFailed", "bb.build.TaskBase", "bb.event.ParseStarted", - "bb.event.ParseProgress", "bb.event.ParseCompleted", "bb.event.CacheLoadStarted", - "bb.event.CacheLoadProgress", "bb.event.CacheLoadCompleted", "bb.command.CommandFailed", - "bb.command.CommandExit", "bb.command.CommandCompleted", "bb.cooker.CookerExit", - "bb.event.MultipleProviders", "bb.event.NoProvider", "bb.runqueue.sceneQueueTaskStarted", - "bb.runqueue.runQueueTaskStarted", "bb.runqueue.runQueueTaskFailed", "bb.runqueue.sceneQueueTaskFailed", - "bb.event.BuildBase", "bb.build.TaskStarted", "bb.build.TaskSucceeded", "bb.build.TaskFailedSilent", - "bb.build.TaskProgress", "bb.event.ProcessStarted", "bb.event.ProcessProgress", "bb.event.ProcessFinished"] - - -def _log_settings_from_server(server): - # Get values of variables which control our output - includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"]) - if error: - logger.error("Unable to get the value of BBINCLUDELOGS variable: %s" % error) - raise BaseException(error) - loglines, error = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"]) - if error: - logger.error("Unable to get the value of BBINCLUDELOGS_LINES variable: %s" % error) - raise BaseException(error) - return includelogs, loglines - - -def main(server, eventHandler, params): - params.updateToServer(server, os.environ.copy()) - - includelogs, loglines = _log_settings_from_server(server) - - ui = TeamCityUI() - - helper = uihelper.BBUIHelper() - - console = logging.StreamHandler(sys.stdout) - errconsole = logging.StreamHandler(sys.stderr) - format = TeamcityLogFormatter() - if params.options.quiet == 0: - forcelevel = None - elif params.options.quiet > 2: - forcelevel = bb.msg.BBLogFormatter.ERROR - else: - forcelevel = bb.msg.BBLogFormatter.WARNING - console.setFormatter(format) - errconsole.setFormatter(format) - if not bb.msg.has_console_handler(logger): - logger.addHandler(console) - logger.addHandler(errconsole) - - if params.options.remote_server and params.options.kill_server: - server.terminateServer() - return - - if params.observe_only: - logger.error("Observe-only mode not supported in this UI") - return 1 - - llevel, debug_domains = bb.msg.constructLogOptions() - server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list]) - - try: - params.updateFromServer(server) - cmdline = params.parseActions() - if not cmdline: - logger.error("No task given") - return 1 - if 'msg' in cmdline and cmdline['msg']: - logger.error(cmdline['msg']) - return 1 - cmdline = cmdline['action'] - ret, error = server.runCommand(cmdline) - if error: - logger.error("{0}: {1}".format(cmdline, error)) - return 1 - elif not ret: - logger.error("Couldn't get default commandline: {0}".format(re)) - return 1 - except xmlrpc.client.Fault as x: - logger.error("XMLRPC Fault getting commandline: {0}".format(x)) - return 1 - - active_process_total = None - is_tasks_running = False - - while True: - try: - event = eventHandler.waitEvent(0.25) - if not event: - continue - - helper.eventHandler(event) - - if isinstance(event, bb.build.TaskBase): - logger.info(event._message) - if isinstance(event, logging.LogRecord): - # Don't report sstate failures as errors, since Yocto will just run the tasks for real - if event.msg == "No suitable staging package found" or (event.msg.startswith( - "Fetcher failure: Unable to find file") and "downloadfilename" in event.msg and "sstate" in event.msg): - event.levelno = bb.msg.BBLogFormatter.WARNING - if event.taskpid != 0: - # For "normal" logging conditions, don't show note logs from tasks - # but do show them if the user has changed the default log level to - # include verbose/debug messages - if event.levelno <= bb.msg.BBLogFormatter.NOTE and (event.levelno < llevel or ( - event.levelno == bb.msg.BBLogFormatter.NOTE and llevel != bb.msg.BBLogFormatter.VERBOSE)): - continue - - # Prefix task messages with recipe/task - if event.taskpid in helper.running_tasks and event.levelno != bb.msg.BBLogFormatter.PLAIN: - taskinfo = helper.running_tasks[event.taskpid] - event.msg = taskinfo['title'] + ': ' + event.msg - if hasattr(event, 'fn'): - event.msg = event.fn + ': ' + event.msg - logger.handle(event) - if isinstance(event, bb.build.TaskFailedSilent): - logger.warning("Logfile for failed setscene task is %s" % event.logfile) - continue - if isinstance(event, bb.build.TaskFailed): - rt = "{0}-{1}:{2}".format(event.pn, event.pv.replace("AUTOINC", "0"), event.task) - - logfile = event.logfile - if not logfile or not os.path.exists(logfile): - TeamCityUI.emit_service_message("buildProblem", description="{0}\nUnknown failure (no log file available)".format(rt)) - if not event.task.endswith("_setscene"): - server.runCommand(["stateForceShutdown"]) - continue - - details = deque(maxlen=loglines) - error_lines = [] - if includelogs and not event.errprinted: - with open(logfile, "r") as f: - while True: - line = f.readline() - if not line: - break - line = line.rstrip() - details.append(' | %s' % line) - # TODO: a less stupid check for errors - if (event.task == "do_compile") and ("error:" in line): - error_lines.append(line) - - if error_lines: - TeamCityUI.emit_service_message("compilationStarted", compiler=rt) - for line in error_lines: - TeamCityUI.emit_service_message("message", text=line, status="ERROR") - TeamCityUI.emit_service_message("compilationFinished", compiler=rt) - else: - TeamCityUI.emit_service_message("buildProblem", description=rt) - - err = "Logfile of failure stored in: %s" % logfile - if details: - ui.block_start("{0} task log".format(rt)) - # TeamCity seems to choke on service messages longer than about 63800 characters, so if error - # details is longer than, say, 60000, batch it up into several messages. - first_message = True - while details: - detail_len = 0 - batch = deque() - while details and detail_len < 60000: - # TODO: This code doesn't bother to handle lines that themselves are extremely long. - line = details.popleft() - batch.append(line) - detail_len += len(line) - - if first_message: - batch.appendleft("Log data follows:") - first_message = False - TeamCityUI.emit_service_message("message", text=err, status="ERROR", - errorDetails="\n".join(batch)) - else: - TeamCityUI.emit_service_message("message", text="[continued]", status="ERROR", - errorDetails="\n".join(batch)) - ui.block_end() - else: - TeamCityUI.emit_service_message("message", text=err, status="ERROR", errorDetails="") - - if not event.task.endswith("_setscene"): - server.runCommand(["stateForceShutdown"]) - - if isinstance(event, bb.event.ProcessStarted): - if event.processname in ["Initialising tasks", "Checking sstate mirror object availability"]: - active_process_total = event.total - ui.block_start(event.processname) - if isinstance(event, bb.event.ProcessFinished): - if event.processname in ["Initialising tasks", "Checking sstate mirror object availability"]: - ui.progress(event.processname, 100) - ui.block_end() - if isinstance(event, bb.event.ProcessProgress): - if event.processname in ["Initialising tasks", - "Checking sstate mirror object availability"] and active_process_total != 0: - ui.progress(event.processname, event.progress * 100 / active_process_total) - if isinstance(event, bb.event.CacheLoadStarted): - ui.block_start("Loading cache") - if isinstance(event, bb.event.CacheLoadProgress): - if event.total != 0: - ui.progress("Loading cache", math.floor(event.current * 100 / event.total)) - if isinstance(event, bb.event.CacheLoadCompleted): - ui.progress("Loading cache", 100) - ui.block_end() - if isinstance(event, bb.event.ParseStarted): - ui.block_start("Parsing recipes and checking upstream revisions") - if isinstance(event, bb.event.ParseProgress): - if event.total != 0: - ui.progress("Parsing recipes", math.floor(event.current * 100 / event.total)) - if isinstance(event, bb.event.ParseCompleted): - ui.progress("Parsing recipes", 100) - ui.block_end() - if isinstance(event, bb.command.CommandCompleted): - return - if isinstance(event, bb.command.CommandFailed): - logger.error(str(event)) - return 1 - if isinstance(event, bb.event.MultipleProviders): - logger.warning(str(event)) - continue - if isinstance(event, bb.event.NoProvider): - logger.error(str(event)) - continue - if isinstance(event, bb.command.CommandExit): - return - if isinstance(event, bb.cooker.CookerExit): - return - if isinstance(event, bb.runqueue.sceneQueueTaskStarted): - if not is_tasks_running: - is_tasks_running = True - ui.block_start("Running tasks") - if event.stats.total != 0: - ui.progress("Running setscene tasks", ( - event.stats.completed + event.stats.active + event.stats.failed + 1) * 100 / event.stats.total) - if isinstance(event, bb.runqueue.runQueueTaskStarted): - if not is_tasks_running: - is_tasks_running = True - ui.block_start("Running tasks") - if event.stats.total != 0: - pseudo_total = event.stats.total - event.stats.skipped - pseudo_complete = event.stats.completed + event.stats.active - event.stats.skipped + event.stats.failed + 1 - # TODO: sometimes this gives over 100% - ui.progress("Running runqueue tasks", (pseudo_complete) * 100 / pseudo_total, - " ({0}/{1})".format(pseudo_complete, pseudo_total)) - if isinstance(event, bb.runqueue.sceneQueueTaskFailed): - logger.warning(str(event)) - continue - if isinstance(event, bb.runqueue.runQueueTaskFailed): - logger.error(str(event)) - return 1 - if isinstance(event, bb.event.LogExecTTY): - pass - except EnvironmentError as ioerror: - # ignore interrupted io - if ioerror.args[0] == 4: - pass - except Exception as ex: - logger.error(str(ex)) - - # except KeyboardInterrupt: - # if shutdown == 2: - # mw.appendText("Third Keyboard Interrupt, exit.\n") - # exitflag = True - # if shutdown == 1: - # mw.appendText("Second Keyboard Interrupt, stopping...\n") - # _, error = server.runCommand(["stateForceShutdown"]) - # if error: - # print("Unable to cleanly stop: %s" % error) - # if shutdown == 0: - # mw.appendText("Keyboard Interrupt, closing down...\n") - # _, error = server.runCommand(["stateShutdown"]) - # if error: - # print("Unable to cleanly shutdown: %s" % error) - # shutdown = shutdown + 1 - # pass diff --git a/bitbake/lib/bb/ui/toasterui.py b/bitbake/lib/bb/ui/toasterui.py deleted file mode 100644 index 6bd21f1844..0000000000 --- a/bitbake/lib/bb/ui/toasterui.py +++ /dev/null @@ -1,479 +0,0 @@ -# -# BitBake ToasterUI Implementation -# based on (No)TTY UI Implementation by Richard Purdie -# -# Handling output to TTYs or files (no TTY) -# -# Copyright (C) 2006-2012 Richard Purdie -# Copyright (C) 2013 Intel Corporation -# -# SPDX-License-Identifier: GPL-2.0-only -# - -from __future__ import division -import time -import sys -try: - import bb -except RuntimeError as exc: - sys.exit(str(exc)) - -from bb.ui import uihelper -from bb.ui.buildinfohelper import BuildInfoHelper - -import bb.msg -import logging -import os - -# pylint: disable=invalid-name -# module properties for UI modules are read by bitbake and the contract should not be broken - - -featureSet = [bb.cooker.CookerFeatures.HOB_EXTRA_CACHES, bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING, bb.cooker.CookerFeatures.SEND_SANITYEVENTS] - -logger = logging.getLogger("ToasterLogger") -interactive = sys.stdout.isatty() - -def _log_settings_from_server(server): - # Get values of variables which control our output - includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"]) - if error: - logger.error("Unable to get the value of BBINCLUDELOGS variable: %s", error) - raise BaseException(error) - loglines, error = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"]) - if error: - logger.error("Unable to get the value of BBINCLUDELOGS_LINES variable: %s", error) - raise BaseException(error) - consolelogfile, error = server.runCommand(["getVariable", "BB_CONSOLELOG"]) - if error: - logger.error("Unable to get the value of BB_CONSOLELOG variable: %s", error) - raise BaseException(error) - return consolelogfile - -# create a log file for a single build and direct the logger at it; -# log file name is timestamped to the millisecond (depending -# on system clock accuracy) to ensure it doesn't overlap with -# other log file names -# -# returns (log file, path to log file) for a build -def _open_build_log(log_dir): - format_str = "%(levelname)s: %(message)s" - - now = time.time() - now_ms = int((now - int(now)) * 1000) - time_str = time.strftime('build_%Y%m%d_%H%M%S', time.localtime(now)) - log_file_name = time_str + ('.%d.log' % now_ms) - build_log_file_path = os.path.join(log_dir, log_file_name) - - build_log = logging.FileHandler(build_log_file_path) - - logformat = bb.msg.BBLogFormatter(format_str) - build_log.setFormatter(logformat) - - bb.msg.addDefaultlogFilter(build_log) - logger.addHandler(build_log) - - return (build_log, build_log_file_path) - -# stop logging to the build log if it exists -def _close_build_log(build_log): - if build_log: - build_log.flush() - build_log.close() - logger.removeHandler(build_log) - -_evt_list = [ - "bb.build.TaskBase", - "bb.build.TaskFailed", - "bb.build.TaskFailedSilent", - "bb.build.TaskStarted", - "bb.build.TaskSucceeded", - "bb.command.CommandCompleted", - "bb.command.CommandExit", - "bb.command.CommandFailed", - "bb.cooker.CookerExit", - "bb.event.BuildInit", - "bb.event.BuildCompleted", - "bb.event.BuildStarted", - "bb.event.CacheLoadCompleted", - "bb.event.CacheLoadProgress", - "bb.event.CacheLoadStarted", - "bb.event.ConfigParsed", - "bb.event.DepTreeGenerated", - "bb.event.LogExecTTY", - "bb.event.MetadataEvent", - "bb.event.MultipleProviders", - "bb.event.NoProvider", - "bb.event.ParseCompleted", - "bb.event.ParseProgress", - "bb.event.ParseStarted", - "bb.event.RecipeParsed", - "bb.event.SanityCheck", - "bb.event.SanityCheckPassed", - "bb.event.TreeDataPreparationCompleted", - "bb.event.TreeDataPreparationStarted", - "bb.runqueue.runQueueTaskCompleted", - "bb.runqueue.runQueueTaskFailed", - "bb.runqueue.runQueueTaskSkipped", - "bb.runqueue.runQueueTaskStarted", - "bb.runqueue.sceneQueueTaskCompleted", - "bb.runqueue.sceneQueueTaskFailed", - "bb.runqueue.sceneQueueTaskStarted", - "logging.LogRecord"] - -def main(server, eventHandler, params): - # set to a logging.FileHandler instance when a build starts; - # see _open_build_log() - build_log = None - - # set to the log path when a build starts - build_log_file_path = None - - helper = uihelper.BBUIHelper() - - if not params.observe_only: - params.updateToServer(server, os.environ.copy()) - params.updateFromServer(server) - - # TODO don't use log output to determine when bitbake has started - # - # WARNING: this log handler cannot be removed, as localhostbecontroller - # relies on output in the toaster_ui.log file to determine whether - # the bitbake server has started, which only happens if - # this logger is setup here (see the TODO in the loop below) - console = logging.StreamHandler(sys.stdout) - format_str = "%(levelname)s: %(message)s" - formatter = bb.msg.BBLogFormatter(format_str) - bb.msg.addDefaultlogFilter(console) - console.setFormatter(formatter) - logger.addHandler(console) - logger.setLevel(logging.INFO) - llevel, debug_domains = bb.msg.constructLogOptions() - result, error = server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list]) - if not result or error: - logger.error("can't set event mask: %s", error) - return 1 - - # verify and warn - build_history_enabled = True - inheritlist, _ = server.runCommand(["getVariable", "INHERIT"]) - - if not "buildhistory" in inheritlist.split(" "): - logger.warning("buildhistory is not enabled. Please enable INHERIT += \"buildhistory\" to see image details.") - build_history_enabled = False - - if not "buildstats" in inheritlist.split(" "): - logger.warning("buildstats is not enabled. Please enable INHERIT += \"buildstats\" to generate build statistics.") - - if not params.observe_only: - cmdline = params.parseActions() - if not cmdline: - print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.") - return 1 - if 'msg' in cmdline and cmdline['msg']: - logger.error(cmdline['msg']) - return 1 - - ret, error = server.runCommand(cmdline['action']) - if error: - logger.error("Command '%s' failed: %s" % (cmdline, error)) - return 1 - elif not ret: - logger.error("Command '%s' failed: returned %s" % (cmdline, ret)) - return 1 - - # set to 1 when toasterui needs to shut down - main.shutdown = 0 - - interrupted = False - return_value = 0 - errors = 0 - warnings = 0 - taskfailures = [] - first = True - - buildinfohelper = BuildInfoHelper(server, build_history_enabled, - os.getenv('TOASTER_BRBE')) - - # write our own log files into bitbake's log directory; - # we're only interested in the path to the parent directory of - # this file, as we're writing our own logs into the same directory - consolelogfile = _log_settings_from_server(server) - log_dir = os.path.dirname(consolelogfile) - bb.utils.mkdirhier(log_dir) - - while True: - try: - event = eventHandler.waitEvent(0.25) - if first: - first = False - - # TODO don't use log output to determine when bitbake has started - # - # this is the line localhostbecontroller needs to - # see in toaster_ui.log which it uses to decide whether - # the bitbake server has started... - logger.info("ToasterUI waiting for events") - - if event is None: - if main.shutdown > 0: - # if shutting down, close any open build log first - _close_build_log(build_log) - - break - continue - - helper.eventHandler(event) - - # pylint: disable=protected-access - # the code will look into the protected variables of the event; no easy way around this - - if isinstance(event, bb.event.HeartbeatEvent): - continue - - if isinstance(event, bb.event.ParseStarted): - if not (build_log and build_log_file_path): - build_log, build_log_file_path = _open_build_log(log_dir) - - buildinfohelper.store_started_build() - buildinfohelper.save_build_log_file_path(build_log_file_path) - buildinfohelper.set_recipes_to_parse(event.total) - continue - - # create a build object in buildinfohelper from either BuildInit - # (if available) or BuildStarted (for jethro and previous versions) - if isinstance(event, (bb.event.BuildStarted, bb.event.BuildInit)): - if not (build_log and build_log_file_path): - build_log, build_log_file_path = _open_build_log(log_dir) - - buildinfohelper.save_build_targets(event) - buildinfohelper.save_build_log_file_path(build_log_file_path) - - # get additional data from BuildStarted - if isinstance(event, bb.event.BuildStarted): - buildinfohelper.save_build_layers_and_variables() - continue - - if isinstance(event, bb.event.ParseProgress): - buildinfohelper.set_recipes_parsed(event.current) - continue - - if isinstance(event, bb.event.ParseCompleted): - buildinfohelper.set_recipes_parsed(event.total) - continue - - if isinstance(event, (bb.build.TaskStarted, bb.build.TaskSucceeded, bb.build.TaskFailedSilent)): - buildinfohelper.update_and_store_task(event) - logger.info("Logfile for task %s", event.logfile) - continue - - if isinstance(event, bb.build.TaskBase): - logger.info(event._message) - - if isinstance(event, bb.event.LogExecTTY): - logger.info(event.msg) - continue - - if isinstance(event, logging.LogRecord): - if event.levelno == -1: - event.levelno = formatter.ERROR - - buildinfohelper.store_log_event(event) - - if event.levelno >= formatter.ERROR: - errors = errors + 1 - elif event.levelno == formatter.WARNING: - warnings = warnings + 1 - - # For "normal" logging conditions, don't show note logs from tasks - # but do show them if the user has changed the default log level to - # include verbose/debug messages - if event.taskpid != 0 and event.levelno <= formatter.NOTE: - continue - - logger.handle(event) - continue - - if isinstance(event, bb.build.TaskFailed): - buildinfohelper.update_and_store_task(event) - logfile = event.logfile - if logfile and os.path.exists(logfile): - bb.error("Logfile of failure stored in: %s" % logfile) - continue - - # these events are unprocessed now, but may be used in the future to log - # timing and error informations from the parsing phase in Toaster - if isinstance(event, (bb.event.SanityCheckPassed, bb.event.SanityCheck)): - continue - if isinstance(event, bb.event.CacheLoadStarted): - continue - if isinstance(event, bb.event.CacheLoadProgress): - continue - if isinstance(event, bb.event.CacheLoadCompleted): - continue - if isinstance(event, bb.event.MultipleProviders): - logger.info(str(event)) - continue - - if isinstance(event, bb.event.NoProvider): - errors = errors + 1 - text = str(event) - logger.error(text) - buildinfohelper.store_log_error(text) - continue - - if isinstance(event, bb.event.ConfigParsed): - continue - if isinstance(event, bb.event.RecipeParsed): - continue - - # end of saved events - - if isinstance(event, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted, bb.runqueue.runQueueTaskSkipped)): - buildinfohelper.store_started_task(event) - continue - - if isinstance(event, bb.runqueue.runQueueTaskCompleted): - buildinfohelper.update_and_store_task(event) - continue - - if isinstance(event, bb.runqueue.runQueueTaskFailed): - buildinfohelper.update_and_store_task(event) - taskfailures.append(event.taskstring) - logger.error(str(event)) - continue - - if isinstance(event, (bb.runqueue.sceneQueueTaskCompleted, bb.runqueue.sceneQueueTaskFailed)): - buildinfohelper.update_and_store_task(event) - continue - - - if isinstance(event, (bb.event.TreeDataPreparationStarted, bb.event.TreeDataPreparationCompleted)): - continue - - if isinstance(event, (bb.event.BuildCompleted, bb.command.CommandFailed)): - - errorcode = 0 - if isinstance(event, bb.command.CommandFailed): - errors += 1 - errorcode = 1 - logger.error(str(event)) - elif isinstance(event, bb.event.BuildCompleted): - buildinfohelper.scan_image_artifacts() - buildinfohelper.clone_required_sdk_artifacts() - - # turn off logging to the current build log - _close_build_log(build_log) - - # reset ready for next BuildStarted - build_log = None - - # update the build info helper on BuildCompleted, not on CommandXXX - buildinfohelper.update_build_information(event, errors, warnings, taskfailures) - - brbe = buildinfohelper.brbe - buildinfohelper.close(errorcode) - - # we start a new build info - if params.observe_only: - logger.debug("ToasterUI prepared for new build") - errors = 0 - warnings = 0 - taskfailures = [] - buildinfohelper = BuildInfoHelper(server, build_history_enabled) - else: - main.shutdown = 1 - - logger.info("ToasterUI build done, brbe: %s", brbe) - break - - if isinstance(event, (bb.command.CommandCompleted, - bb.command.CommandFailed, - bb.command.CommandExit)): - if params.observe_only: - errorcode = 0 - else: - main.shutdown = 1 - - continue - - if isinstance(event, bb.event.MetadataEvent): - if event.type == "SinglePackageInfo": - buildinfohelper.store_build_package_information(event) - elif event.type == "LayerInfo": - buildinfohelper.store_layer_info(event) - elif event.type == "BuildStatsList": - buildinfohelper.store_tasks_stats(event) - elif event.type == "ImagePkgList": - buildinfohelper.store_target_package_data(event) - elif event.type == "MissedSstate": - buildinfohelper.store_missed_state_tasks(event) - elif event.type == "SDKArtifactInfo": - buildinfohelper.scan_sdk_artifacts(event) - elif event.type == "SetBRBE": - buildinfohelper.brbe = buildinfohelper._get_data_from_event(event) - elif event.type == "TaskArtifacts": - buildinfohelper.scan_task_artifacts(event) - elif event.type == "OSErrorException": - logger.error(event) - else: - logger.error("Unprocessed MetadataEvent %s", event.type) - continue - - if isinstance(event, bb.cooker.CookerExit): - # shutdown when bitbake server shuts down - main.shutdown = 1 - continue - - if isinstance(event, bb.event.DepTreeGenerated): - buildinfohelper.store_dependency_information(event) - continue - - logger.warning("Unknown event: %s", event) - return_value += 1 - - except EnvironmentError as ioerror: - logger.warning("EnvironmentError: %s" % ioerror) - # ignore interrupted io system calls - if ioerror.args[0] == 4: # errno 4 is EINTR - logger.warning("Skipped EINTR: %s" % ioerror) - else: - raise - except KeyboardInterrupt: - if params.observe_only: - print("\nKeyboard Interrupt, exiting observer...") - main.shutdown = 2 - if not params.observe_only and main.shutdown == 1: - print("\nSecond Keyboard Interrupt, stopping...\n") - _, error = server.runCommand(["stateForceShutdown"]) - if error: - logger.error("Unable to cleanly stop: %s" % error) - if not params.observe_only and main.shutdown == 0: - print("\nKeyboard Interrupt, closing down...\n") - interrupted = True - _, error = server.runCommand(["stateShutdown"]) - if error: - logger.error("Unable to cleanly shutdown: %s" % error) - buildinfohelper.cancel_cli_build() - main.shutdown = main.shutdown + 1 - except Exception as e: - # print errors to log - import traceback - from pprint import pformat - exception_data = traceback.format_exc() - logger.error("%s\n%s" , e, exception_data) - - # save them to database, if possible; if it fails, we already logged to console. - try: - buildinfohelper.store_log_exception("%s\n%s" % (str(e), exception_data)) - except Exception as ce: - logger.error("CRITICAL - Failed to to save toaster exception to the database: %s", str(ce)) - - # make sure we return with an error - return_value += 1 - - if interrupted and return_value == 0: - return_value += 1 - - logger.warning("Return value is %d", return_value) - return return_value diff --git a/bitbake/lib/bb/ui/uievent.py b/bitbake/lib/bb/ui/uievent.py deleted file mode 100644 index c2f830d530..0000000000 --- a/bitbake/lib/bb/ui/uievent.py +++ /dev/null @@ -1,144 +0,0 @@ -# -# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer -# Copyright (C) 2006 - 2007 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -""" -Use this class to fork off a thread to recieve event callbacks from the bitbake -server and queue them for the UI to process. This process must be used to avoid -client/server deadlocks. -""" - -import collections, logging, pickle, socket, threading -from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler - -import bb - -logger = logging.getLogger(__name__) - -class BBUIEventQueue: - def __init__(self, BBServer, clientinfo=("localhost, 0")): - - self.eventQueue = [] - self.eventQueueLock = threading.Lock() - self.eventQueueNotify = threading.Event() - - self.BBServer = BBServer - self.clientinfo = clientinfo - - server = UIXMLRPCServer(self.clientinfo) - self.host, self.port = server.socket.getsockname() - - server.register_function( self.system_quit, "event.quit" ) - server.register_function( self.send_event, "event.sendpickle" ) - server.socket.settimeout(1) - - self.EventHandle = None - - # the event handler registration may fail here due to cooker being in invalid state - # this is a transient situation, and we should retry a couple of times before - # giving up - - for count_tries in range(5): - ret = self.BBServer.registerEventHandler(self.host, self.port) - - if isinstance(ret, collections.abc.Iterable): - self.EventHandle, error = ret - else: - self.EventHandle = ret - error = "" - - if self.EventHandle is not None: - break - - errmsg = "Could not register UI event handler. Error: %s, host %s, "\ - "port %d" % (error, self.host, self.port) - bb.warn("%s, retry" % errmsg) - - import time - time.sleep(1) - else: - raise Exception(errmsg) - - self.server = server - - self.t = threading.Thread() - self.t.daemon = True - self.t.run = self.startCallbackHandler - self.t.start() - - def getEvent(self): - with bb.utils.lock_timeout(self.eventQueueLock): - if not self.eventQueue: - return None - item = self.eventQueue.pop(0) - if not self.eventQueue: - self.eventQueueNotify.clear() - return item - - def waitEvent(self, delay): - self.eventQueueNotify.wait(delay) - return self.getEvent() - - def queue_event(self, event): - with bb.utils.lock_timeout(self.eventQueueLock): - self.eventQueue.append(event) - self.eventQueueNotify.set() - - def send_event(self, event): - self.queue_event(pickle.loads(event)) - - def startCallbackHandler(self): - - self.server.timeout = 1 - bb.utils.set_process_name("UIEventQueue") - while not self.server.quit: - try: - self.server.handle_request() - except Exception as e: - import traceback - logger.error("BBUIEventQueue.startCallbackHandler: Exception while trying to handle request: %s\n%s" % (e, traceback.format_exc())) - - self.server.server_close() - - def system_quit( self ): - """ - Shut down the callback thread - """ - try: - self.BBServer.unregisterEventHandler(self.EventHandle) - except: - pass - self.server.quit = True - -class UIXMLRPCServer (SimpleXMLRPCServer): - - def __init__( self, interface ): - self.quit = False - SimpleXMLRPCServer.__init__( self, - interface, - requestHandler=SimpleXMLRPCRequestHandler, - logRequests=False, allow_none=True, use_builtin_types=True) - - def get_request(self): - while not self.quit: - try: - sock, addr = self.socket.accept() - sock.settimeout(1) - return (sock, addr) - except socket.timeout: - pass - return (None, None) - - def close_request(self, request): - if request is None: - return - SimpleXMLRPCServer.close_request(self, request) - - def process_request(self, request, client_address): - if request is None: - return - SimpleXMLRPCServer.process_request(self, request, client_address) - diff --git a/bitbake/lib/bb/ui/uihelper.py b/bitbake/lib/bb/ui/uihelper.py deleted file mode 100644 index a223632471..0000000000 --- a/bitbake/lib/bb/ui/uihelper.py +++ /dev/null @@ -1,66 +0,0 @@ -# -# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer -# Copyright (C) 2006 - 2007 Richard Purdie -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import bb.build -import time - -class BBUIHelper: - def __init__(self): - self.needUpdate = False - self.running_tasks = {} - # Running PIDs preserves the order tasks were executed in - self.failed_tasks = [] - self.pidmap = {} - self.tasknumber_current = 0 - self.tasknumber_total = 0 - - def eventHandler(self, event): - # PIDs are a bad idea as they can be reused before we process all UI events. - # We maintain a 'fuzzy' match for TaskProgress since there is no other way to match - def removetid(pid, tid): - del self.running_tasks[tid] - if self.pidmap[pid] == tid: - del self.pidmap[pid] - self.needUpdate = True - - if isinstance(event, bb.build.TaskStarted): - tid = event._fn + ":" + event._task - if event._mc != "": - self.running_tasks[tid] = { 'title' : "mc:%s:%s %s" % (event._mc, event._package, event._task), 'starttime' : time.time(), 'pid' : event.pid } - else: - self.running_tasks[tid] = { 'title' : "%s %s" % (event._package, event._task), 'starttime' : time.time(), 'pid' : event.pid } - self.pidmap[event.pid] = tid - self.needUpdate = True - elif isinstance(event, bb.build.TaskSucceeded): - tid = event._fn + ":" + event._task - removetid(event.pid, tid) - elif isinstance(event, bb.build.TaskFailedSilent): - tid = event._fn + ":" + event._task - removetid(event.pid, tid) - # Don't add to the failed tasks list since this is e.g. a setscene task failure - elif isinstance(event, bb.build.TaskFailed): - tid = event._fn + ":" + event._task - removetid(event.pid, tid) - self.failed_tasks.append( { 'title' : "%s %s" % (event._package, event._task)}) - elif isinstance(event, bb.runqueue.runQueueTaskStarted) or isinstance(event, bb.runqueue.sceneQueueTaskStarted): - self.tasknumber_current = event.stats.completed + event.stats.active + event.stats.failed - self.tasknumber_total = event.stats.total - self.setscene_current = event.stats.setscene_active + event.stats.setscene_covered + event.stats.setscene_notcovered - self.setscene_total = event.stats.setscene_total - self.needUpdate = True - elif isinstance(event, bb.build.TaskProgress): - if event.pid > 0 and event.pid in self.pidmap: - self.running_tasks[self.pidmap[event.pid]]['progress'] = event.progress - self.running_tasks[self.pidmap[event.pid]]['rate'] = event.rate - self.needUpdate = True - else: - return False - return True - - def getTasks(self): - self.needUpdate = False - return (self.running_tasks, self.failed_tasks) diff --git a/bitbake/lib/bb/utils.py b/bitbake/lib/bb/utils.py deleted file mode 100644 index 366836bfc9..0000000000 --- a/bitbake/lib/bb/utils.py +++ /dev/null @@ -1,2266 +0,0 @@ -""" -BitBake Utility Functions -""" - -# Copyright (C) 2004 Michael Lauer -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import re, fcntl, os, string, stat, shutil, time -import sys -import errno -import logging -import locale -from bb import multiprocessing -import importlib -import importlib.machinery -import importlib.util -import itertools -import subprocess -import glob -import fnmatch -import traceback -import signal -import collections -import copy -import ctypes -import random -import socket -import struct -import tempfile -from subprocess import getstatusoutput -from contextlib import contextmanager -from ctypes import cdll -import bb -import bb.msg -import bb.filter - -logger = logging.getLogger("BitBake.Util") -python_extensions = importlib.machinery.all_suffixes() - - -def clean_context(): - return { - "os": os, - "bb": bb, - "time": time, - } - -def get_context(): - return _context - - -def set_context(ctx): - _context = ctx - -# Context used in better_exec, eval -_context = clean_context() - -class VersionStringException(Exception): - """Exception raised when an invalid version specification is found""" - -def explode_version(s): - r = [] - alpha_regexp = re.compile(r'^([a-zA-Z]+)(.*)$') - numeric_regexp = re.compile(r'^(\d+)(.*)$') - while (s != ''): - if s[0] in string.digits: - m = numeric_regexp.match(s) - r.append((0, int(m.group(1)))) - s = m.group(2) - continue - if s[0] in string.ascii_letters: - m = alpha_regexp.match(s) - r.append((1, m.group(1))) - s = m.group(2) - continue - if s[0] == '~': - r.append((-1, s[0])) - else: - r.append((2, s[0])) - s = s[1:] - return r - -def split_version(s): - """Split a version string into its constituent parts (PE, PV, PR). - - Arguments: - - - ``s``: version string. The format of the input string should be:: - - ${PE}:${PV}-${PR} - - Returns a tuple ``(pe, pv, pr)``. - """ - s = s.strip(" <>=") - e = 0 - if s.count(':'): - e = int(s.split(":")[0]) - s = s.split(":")[1] - r = "" - if s.count('-'): - r = s.rsplit("-", 1)[1] - s = s.rsplit("-", 1)[0] - v = s - return (e, v, r) - -def vercmp_part(a, b): - va = explode_version(a) - vb = explode_version(b) - while True: - if va == []: - (oa, ca) = (0, None) - else: - (oa, ca) = va.pop(0) - if vb == []: - (ob, cb) = (0, None) - else: - (ob, cb) = vb.pop(0) - if (oa, ca) == (0, None) and (ob, cb) == (0, None): - return 0 - if oa < ob: - return -1 - elif oa > ob: - return 1 - elif ca is None: - return -1 - elif cb is None: - return 1 - elif ca < cb: - return -1 - elif ca > cb: - return 1 - -def vercmp(ta, tb): - (ea, va, ra) = ta - (eb, vb, rb) = tb - - r = int(ea or 0) - int(eb or 0) - if (r == 0): - r = vercmp_part(va, vb) - if (r == 0): - r = vercmp_part(ra, rb) - return r - -def vercmp_string(a, b): - """ Split version strings using ``bb.utils.split_version()`` and compare - them with ``bb.utils.vercmp().`` - - Arguments: - - - ``a``: left version string operand. - - ``b``: right version string operand. - - Returns what ``bb.utils.vercmp()`` returns.""" - ta = split_version(a) - tb = split_version(b) - return vercmp(ta, tb) - -def vercmp_string_op(a, b, op): - """ - Takes the return value ``bb.utils.vercmp()`` and returns the operation - defined by ``op`` between the return value and 0. - - Arguments: - - - ``a``: left version string operand. - - ``b``: right version string operand. - - ``op``: operator string. Can be one of ``=``, ``==``, ``<=``, ``>=``, - ``>``, ``>>``, ``<``, ``<<`` or ``!=``. - """ - res = vercmp_string(a, b) - if op in ('=', '=='): - return res == 0 - elif op == '<=': - return res <= 0 - elif op == '>=': - return res >= 0 - elif op in ('>', '>>'): - return res > 0 - elif op in ('<', '<<'): - return res < 0 - elif op == '!=': - return res != 0 - else: - raise VersionStringException('Unsupported comparison operator "%s"' % op) - -@bb.filter.filter_proc(name="bb.utils.explode_deps") -def explode_deps(s): - """ - Takes an RDEPENDS style string of format:: - - DEPEND1 (optional version) DEPEND2 (optional version) ... - - Arguments: - - - ``s``: input RDEPENDS style string - - Returns a list of dependencies. - - Version information is ignored. - """ - r = [] - l = s.split() - flag = False - for i in l: - if i[0] == '(': - flag = True - #j = [] - if not flag: - r.append(i) - #else: - # j.append(i) - if flag and i.endswith(')'): - flag = False - # Ignore version - #r[-1] += ' ' + ' '.join(j) - return r - -def explode_dep_versions2(s, *, sort=True): - """ - Takes an RDEPENDS style string of format:: - - DEPEND1 (optional version) DEPEND2 (optional version) ... - - Arguments: - - - ``s``: input RDEPENDS style string - - ``*``: *Unused*. - - ``sort``: whether to sort the output or not. - - Returns a dictionary of dependencies and versions. - """ - r = collections.OrderedDict() - l = s.replace(",", "").split() - lastdep = None - lastcmp = "" - lastver = "" - incmp = False - inversion = False - for i in l: - if i[0] == '(': - incmp = True - i = i[1:].strip() - if not i: - continue - - if incmp: - incmp = False - inversion = True - # This list is based on behavior and supported comparisons from deb, opkg and rpm. - # - # Even though =<, <<, ==, !=, =>, and >> may not be supported, - # we list each possibly valid item. - # The build system is responsible for validation of what it supports. - if i.startswith(('<=', '=<', '<<', '==', '!=', '>=', '=>', '>>')): - lastcmp = i[0:2] - i = i[2:] - elif i.startswith(('<', '>', '=')): - lastcmp = i[0:1] - i = i[1:] - else: - # This is an unsupported case! - raise VersionStringException('Invalid version specification in "(%s" - invalid or missing operator' % i) - lastcmp = (i or "") - i = "" - i.strip() - if not i: - continue - - if inversion: - if i.endswith(')'): - i = i[:-1] or "" - inversion = False - if lastver and i: - lastver += " " - if i: - lastver += i - if lastdep not in r: - r[lastdep] = [] - r[lastdep].append(lastcmp + " " + lastver) - continue - - #if not inversion: - lastdep = i - lastver = "" - lastcmp = "" - if not (i in r and r[i]): - r[lastdep] = [] - - if sort: - r = collections.OrderedDict(sorted(r.items(), key=lambda x: x[0])) - return r - -def explode_dep_versions(s): - """ - Take an RDEPENDS style string of format:: - - DEPEND1 (optional version) DEPEND2 (optional version) ... - - Skips null values and items appeared in dependency string multiple times. - - Arguments: - - - ``s``: input RDEPENDS style string - - Returns a dictionary of dependencies and versions. - """ - r = explode_dep_versions2(s) - for d in r: - if not r[d]: - r[d] = None - continue - if len(r[d]) > 1: - bb.warn("explode_dep_versions(): Item %s appeared in dependency string '%s' multiple times with different values. explode_dep_versions cannot cope with this." % (d, s)) - r[d] = r[d][0] - return r - -def join_deps(deps, commasep=True): - """ - Take a result from ``bb.utils.explode_dep_versions()`` and generate a - dependency string. - - Arguments: - - - ``deps``: dictionary of dependencies and versions. - - ``commasep``: makes the return value separated by commas if ``True``, - separated by spaces otherwise. - - Returns a comma-separated (space-separated if ``comma-sep`` is ``False``) - string of dependencies and versions. - """ - result = [] - for dep in deps: - if deps[dep]: - if isinstance(deps[dep], list): - for v in deps[dep]: - result.append(dep + " (" + v + ")") - else: - result.append(dep + " (" + deps[dep] + ")") - else: - result.append(dep) - if commasep: - return ", ".join(result) - else: - return " ".join(result) - -def _print_trace(body, line): - """ - Print the Environment of a Text Body - """ - error = [] - # print the environment of the method - min_line = max(1, line-4) - max_line = min(line + 4, len(body)) - for i in range(min_line, max_line + 1): - if line == i: - error.append(' *** %.4d:%s' % (i, body[i-1].rstrip())) - else: - error.append(' %.4d:%s' % (i, body[i-1].rstrip())) - return error - -def better_compile(text, file, realfile, mode = "exec", lineno = 0): - """ - A better compile method. This method - will print the offending lines. - """ - try: - cache = bb.methodpool.compile_cache(text) - if cache: - return cache - # We can't add to the linenumbers for compile, we can pad to the correct number of blank lines though - text2 = "\n" * int(lineno) + text - code = compile(text2, realfile, mode) - bb.methodpool.compile_cache_add(text, code) - return code - except Exception as e: - error = [] - # split the text into lines again - body = text.split('\n') - error.append("Error in compiling python function in %s, line %s:\n" % (realfile, e.lineno)) - if hasattr(e, "lineno"): - error.append("The code lines resulting in this error were:") - # e.lineno: line's position in reaflile - # lineno: function name's "position -1" in realfile - # e.lineno - lineno: line's relative position in function - error.extend(_print_trace(body, e.lineno - lineno)) - else: - error.append("The function causing this error was:") - for line in body: - error.append(line) - error.append("%s: %s" % (e.__class__.__name__, str(e))) - - logger.error("\n".join(error)) - - e = bb.BBHandledException(e) - raise e - -def _print_exception(t, value, tb, realfile, text, context): - error = [] - try: - exception = traceback.format_exception_only(t, value) - error.append('Error executing a python function in %s:\n' % realfile) - - # Strip 'us' from the stack (better_exec call) unless that was where the - # error came from - if tb.tb_next is not None: - tb = tb.tb_next - - textarray = text.split('\n') - - linefailed = tb.tb_lineno - - tbextract = traceback.extract_tb(tb) - tbformat = traceback.format_list(tbextract) - error.append("The stack trace of python calls that resulted in this exception/failure was:") - error.append("File: '%s', lineno: %s, function: %s" % (tbextract[0][0], tbextract[0][1], tbextract[0][2])) - error.extend(_print_trace(textarray, linefailed)) - - # See if this is a function we constructed and has calls back into other functions in - # "text". If so, try and improve the context of the error by diving down the trace - level = 0 - nexttb = tb.tb_next - while nexttb is not None and (level+1) < len(tbextract): - error.append("File: '%s', lineno: %s, function: %s" % (tbextract[level+1][0], tbextract[level+1][1], tbextract[level+1][2])) - if tbextract[level][0] == tbextract[level+1][0] and tbextract[level+1][2] == tbextract[level][0]: - # The code was possibly in the string we compiled ourselves - error.extend(_print_trace(textarray, tbextract[level+1][1])) - elif tbextract[level+1][0].startswith("/"): - # The code looks like it might be in a file, try and load it - try: - with open(tbextract[level+1][0], "r") as f: - text = f.readlines() - error.extend(_print_trace(text, tbextract[level+1][1])) - except: - error.append(tbformat[level+1]) - else: - error.append(tbformat[level+1]) - nexttb = tb.tb_next - level = level + 1 - - error.append("Exception: %s" % ''.join(exception)) - - # If the exception is from spawning a task, let's be helpful and display - # the output (which hopefully includes stderr). - if isinstance(value, subprocess.CalledProcessError) and value.output: - error.append("Subprocess output:") - error.append(value.output.decode("utf-8", errors="ignore")) - finally: - logger.error("\n".join(error)) - -def better_exec(code, context, text = None, realfile = "", pythonexception=False): - """ - Similiar to better_compile, better_exec will - print the lines that are responsible for the - error. - """ - import bb.parse - if not text: - text = code - if not hasattr(code, "co_filename"): - code = better_compile(code, realfile, realfile) - try: - exec(code, get_context(), context) - except (bb.BBHandledException, bb.parse.SkipRecipe, bb.data_smart.ExpansionError, bb.process.ExecutionError): - # Error already shown so passthrough, no need for traceback - raise - except Exception as e: - if pythonexception: - raise - (t, value, tb) = sys.exc_info() - try: - _print_exception(t, value, tb, realfile, text, context) - except Exception as e2: - logger.error("Exception handler error: %s" % str(e2)) - - e = bb.BBHandledException(e) - raise e - -def simple_exec(code, context): - exec(code, get_context(), context) - -def better_eval(source, locals, extraglobals = None): - ctx = get_context() - if extraglobals: - ctx = copy.copy(ctx) - for g in extraglobals: - ctx[g] = extraglobals[g] - return eval(source, ctx, locals) - -@contextmanager -def fileslocked(files, *args, **kwargs): - """Context manager for locking and unlocking file locks. Uses - ``bb.utils.lockfile()`` and ``bb.utils.unlockfile()`` to lock and unlock - files. - - No return value.""" - locks = [] - if files: - for lockfile in files: - l = bb.utils.lockfile(lockfile, *args, **kwargs) - if l is not None: - locks.append(l) - - try: - yield - finally: - locks.reverse() - for lock in locks: - bb.utils.unlockfile(lock) - -def lockfile(name, shared=False, retry=True, block=False): - """ - Use the specified file (with filename ``name``) as a lock file, return when - the lock has been acquired. Returns a variable to pass to unlockfile(). - - Arguments: - - - ``shared``: sets the lock as a shared lock instead of an - exclusive lock. - - ``retry``: ``True`` to re-try locking if it fails, ``False`` - otherwise. - - ``block``: ``True`` to block until the lock succeeds, - ``False`` otherwise. - - The retry and block parameters are kind of equivalent unless you - consider the possibility of sending a signal to the process to break - out - at which point you want block=True rather than retry=True. - - Returns the locked file descriptor in case of success, ``None`` otherwise. - """ - basename = os.path.basename(name) - if len(basename) > 255: - root, ext = os.path.splitext(basename) - basename = root[:255 - len(ext)] + ext - - dirname = os.path.dirname(name) - mkdirhier(dirname) - - name = os.path.join(dirname, basename) - - if not os.access(dirname, os.W_OK): - logger.error("Unable to acquire lock '%s', directory is not writable", - name) - sys.exit(1) - - op = fcntl.LOCK_EX - if shared: - op = fcntl.LOCK_SH - if not retry and not block: - op = op | fcntl.LOCK_NB - - while True: - # If we leave the lockfiles lying around there is no problem - # but we should clean up after ourselves. This gives potential - # for races though. To work around this, when we acquire the lock - # we check the file we locked was still the lock file on disk. - # by comparing inode numbers. If they don't match or the lockfile - # no longer exists, we start again. - - # This implementation is unfair since the last person to request the - # lock is the most likely to win it. - - try: - lf = open(name, 'a+') - fileno = lf.fileno() - fcntl.flock(fileno, op) - statinfo = os.fstat(fileno) - if os.path.exists(lf.name): - statinfo2 = os.stat(lf.name) - if statinfo.st_ino == statinfo2.st_ino: - return lf - lf.close() - except OSError as e: - if e.errno == errno.EACCES or e.errno == errno.ENAMETOOLONG: - logger.error("Unable to acquire lock '%s', %s", - e.strerror, name) - sys.exit(1) - try: - lf.close() - except Exception: - pass - pass - if not retry: - return None - -def unlockfile(lf): - """ - Unlock a file locked using ``bb.utils.lockfile()``. - - Arguments: - - - ``lf``: the locked file descriptor. - - No return value. - """ - try: - # If we had a shared lock, we need to promote to exclusive before - # removing the lockfile. Attempt this, ignore failures. - fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) - os.unlink(lf.name) - except (IOError, OSError): - pass - fcntl.flock(lf.fileno(), fcntl.LOCK_UN) - lf.close() - -def _hasher(method, filename): - import mmap - - with open(filename, "rb") as f: - try: - with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm: - for chunk in iter(lambda: mm.read(8192), b''): - method.update(chunk) - except ValueError: - # You can't mmap() an empty file so silence this exception - pass - return method.hexdigest() - - -def md5_file(filename): - """ - Arguments: - - - ``filename``: path to the input file. - - Returns the hexadecimal string representation of the MD5 checksum of filename. - """ - import hashlib - try: - sig = hashlib.new('MD5', usedforsecurity=False) - except TypeError: - # Some configurations don't appear to support two arguments - sig = hashlib.new('MD5') - return _hasher(sig, filename) - -def sha256_file(filename): - """ - Returns the hexadecimal representation of the 256-bit SHA checksum of - filename. - - Arguments: - - - ``filename``: path to the file. - """ - import hashlib - return _hasher(hashlib.sha256(), filename) - -def sha1_file(filename): - """ - Returns the hexadecimal representation of the SHA1 checksum of the filename - - Arguments: - - - ``filename``: path to the file. - """ - import hashlib - return _hasher(hashlib.sha1(), filename) - -def sha384_file(filename): - """ - Returns the hexadecimal representation of the SHA384 checksum of the filename - - Arguments: - - - ``filename``: path to the file. - """ - import hashlib - return _hasher(hashlib.sha384(), filename) - -def sha512_file(filename): - """ - Returns the hexadecimal representation of the SHA512 checksum of the filename - - Arguments: - - - ``filename``: path to the file. - """ - import hashlib - return _hasher(hashlib.sha512(), filename) - -def goh1_file(filename): - """ - Returns the hexadecimal string representation of the Go mod h1 checksum of the - filename. The Go mod h1 checksum uses the Go dirhash package. The package - defines hashes over directory trees and is used by go mod for mod files and - zip archives. - - Arguments: - - - ``filename``: path to the file. - """ - import hashlib - import zipfile - - lines = [] - if zipfile.is_zipfile(filename): - with zipfile.ZipFile(filename) as archive: - for fn in sorted(archive.namelist()): - method = hashlib.sha256() - method.update(archive.read(fn)) - hash = method.hexdigest() - lines.append("%s %s\n" % (hash, fn)) - else: - hash = _hasher(hashlib.sha256(), filename) - lines.append("%s go.mod\n" % hash) - method = hashlib.sha256() - method.update("".join(lines).encode('utf-8')) - return method.hexdigest() - -def preserved_envvars_exported(): - """Returns the list of variables which are taken from the environment and - placed in and exported from the metadata.""" - return [ - 'BB_TASKHASH', - 'HOME', - 'LOGNAME', - 'PATH', - 'PWD', - 'SHELL', - 'USER', - 'LC_ALL', - 'BBSERVER', - ] - -def preserved_envvars(): - """Returns the list of variables which are taken from the environment and - placed in the metadata.""" - v = [ - 'BBPATH', - 'BB_PRESERVE_ENV', - 'BB_ENV_PASSTHROUGH_ADDITIONS', - ] - return v + preserved_envvars_exported() - -def check_system_locale(): - """Make sure the required system locale are available and configured. - - No return value.""" - default_locale = locale.getlocale(locale.LC_CTYPE) - - try: - locale.setlocale(locale.LC_CTYPE, ("en_US", "UTF-8")) - except: - sys.exit("Please make sure locale 'en_US.UTF-8' is available on your system") - else: - locale.setlocale(locale.LC_CTYPE, default_locale) - - if sys.getfilesystemencoding() != "utf-8": - sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\n" - "Python can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.") - -def filter_environment(good_vars): - """ - Create a pristine environment for bitbake. This will remove variables that - are not known and may influence the build in a negative way. - - Arguments: - - - ``good_vars``: list of variable to exclude from the filtering. - - No return value. - """ - - removed_vars = {} - for key in list(os.environ): - if key in good_vars: - continue - - removed_vars[key] = os.environ[key] - del os.environ[key] - - # If we spawn a python process, we need to have a UTF-8 locale, else python's file - # access methods will use ascii. You can't change that mode once the interpreter is - # started so we have to ensure a locale is set. Ideally we'd use C.UTF-8 but not all - # distros support that and we need to set something. - os.environ["LC_ALL"] = "en_US.UTF-8" - - if removed_vars: - logger.debug("Removed the following variables from the environment: %s", ", ".join(removed_vars.keys())) - - return removed_vars - -def approved_variables(): - """ - Determine and return the list of variables which are approved - to remain in the environment. - """ - if 'BB_PRESERVE_ENV' in os.environ: - return os.environ.keys() - approved = [] - if 'BB_ENV_PASSTHROUGH' in os.environ: - approved = os.environ['BB_ENV_PASSTHROUGH'].split() - approved.extend(['BB_ENV_PASSTHROUGH']) - else: - approved = preserved_envvars() - if 'BB_ENV_PASSTHROUGH_ADDITIONS' in os.environ: - approved.extend(os.environ['BB_ENV_PASSTHROUGH_ADDITIONS'].split()) - if 'BB_ENV_PASSTHROUGH_ADDITIONS' not in approved: - approved.extend(['BB_ENV_PASSTHROUGH_ADDITIONS']) - return approved - -def clean_environment(): - """ - Clean up any spurious environment variables. This will remove any - variables the user hasn't chosen to preserve. - - No return value. - """ - if 'BB_PRESERVE_ENV' not in os.environ: - good_vars = approved_variables() - return filter_environment(good_vars) - - return {} - -def empty_environment(): - """ - Remove all variables from the environment. - - No return value. - """ - for s in list(os.environ.keys()): - os.unsetenv(s) - del os.environ[s] - -def build_environment(d): - """ - Build an environment from all exported variables. - - Arguments: - - - ``d``: the data store. - - No return value. - """ - import bb.data - for var in bb.data.keys(d): - export = d.getVarFlag(var, "export", False) - if export: - os.environ[var] = d.getVar(var) or "" - -def _check_unsafe_delete_path(path): - """ - Basic safeguard against recursively deleting something we shouldn't. If it returns True, - the caller should raise an exception with an appropriate message. - NOTE: This is NOT meant to be a security mechanism - just a guard against silly mistakes - with potentially disastrous results. - """ - extra = '' - # HOME might not be /home/something, so in case we can get it, check against it - homedir = os.environ.get('HOME', '') - if homedir: - extra = '|%s' % homedir - if re.match('(/|//|/home|/home/[^/]*%s)$' % extra, os.path.abspath(path)): - return True - return False - -def remove(path, recurse=False, ionice=False): - """Equivalent to rm -f or rm -rf. - - Arguments: - - - ``path``: path to file/directory to remove. - - ``recurse``: deletes recursively if ``True``. - - ``ionice``: prepends ``ionice -c 3`` to the ``rm`` command. See ``man - ionice``. - - No return value. - """ - if not path: - return - if recurse: - for name in glob.glob(path): - if _check_unsafe_delete_path(name): - raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % name) - # shutil.rmtree(name) would be ideal but its too slow - cmd = [] - if ionice: - cmd = ['ionice', '-c', '3'] - subprocess.check_call(cmd + ['rm', '-rf'] + glob.glob(path)) - return - for name in glob.glob(path): - try: - os.unlink(name) - except OSError as exc: - if exc.errno != errno.ENOENT: - raise - -def prunedir(topdir, ionice=False): - """ - Delete everything reachable from the directory named in ``topdir``. - - Arguments: - - - ``topdir``: directory path. - - ``ionice``: prepends ``ionice -c 3`` to the ``rm`` command. See ``man - ionice``. - - No return value. - """ - # CAUTION: This is dangerous! - if _check_unsafe_delete_path(topdir): - raise Exception('bb.utils.prunedir: called with dangerous path "%s", refusing to delete!' % topdir) - remove(topdir, recurse=True, ionice=ionice) - -# -# Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var) -# but thats possibly insane and suffixes is probably going to be small -# -def prune_suffix(var, suffixes, d): - """ - Check if ``var`` ends with any of the suffixes listed in ``suffixes`` and - remove it if found. - - Arguments: - - - ``var``: string to check for suffixes. - - ``suffixes``: list of strings representing suffixes to check for. - - Returns the string ``var`` without the suffix. - """ - for suffix in suffixes: - if suffix and var.endswith(suffix): - return var[:-len(suffix)] - return var - -def mkdirhier(directory): - """Create a directory like 'mkdir -p', but does not complain if - directory already exists like ``os.makedirs()``. - - Arguments: - - - ``directory``: path to the directory. - - No return value. - """ - if '${' in str(directory): - bb.fatal("Directory name {} contains unexpanded bitbake variable. This may cause build failures and WORKDIR polution.".format(directory)) - try: - os.makedirs(directory) - except OSError as e: - if e.errno != errno.EEXIST or not os.path.isdir(directory): - raise e - -def movefile(src, dest, newmtime = None, sstat = None): - """Moves a file from ``src`` to ``dest``, preserving all permissions and - attributes; mtime will be preserved even when moving across - filesystems. Returns ``True`` on success and ``False`` on failure. Move is - atomic. - - Arguments: - - - ``src`` -- Source file. - - ``dest`` -- Destination file. - - ``newmtime`` -- new mtime to be passed as float seconds since the epoch. - - ``sstat`` -- os.stat_result to use for the destination file. - - Returns an ``os.stat_result`` of the destination file if the - source file is a symbolic link or the ``sstat`` argument represents a - symbolic link - in which case the destination file will also be created as - a symbolic link. - - Otherwise, returns ``newmtime`` on success and ``False`` on failure. - """ - - #print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")" - try: - if not sstat: - sstat = os.lstat(src) - except Exception as e: - logger.warning("movefile: Stating source file failed...", e) - return None - - destexists = 1 - try: - dstat = os.lstat(dest) - except: - dstat = os.lstat(os.path.dirname(dest)) - destexists = 0 - - if destexists: - if stat.S_ISLNK(dstat[stat.ST_MODE]): - try: - os.unlink(dest) - destexists = 0 - except Exception as e: - pass - - if stat.S_ISLNK(sstat[stat.ST_MODE]): - try: - target = os.readlink(src) - if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): - os.unlink(dest) - os.symlink(target, dest) - #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) - os.unlink(src) - return os.lstat(dest) - except Exception as e: - logger.warning("movefile: failed to properly create symlink:", dest, "->", target, e) - return None - - renamefailed = 1 - # os.rename needs to know the dest path ending with file name - # so append the file name to a path only if it's a dir specified - srcfname = os.path.basename(src) - destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \ - else dest - - if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]: - try: - bb.utils.rename(src, destpath) - renamefailed = 0 - except Exception as e: - if e.errno != errno.EXDEV: - # Some random error. - logger.warning("movefile: Failed to move", src, "to", dest, e) - return None - # Invalid cross-device-link 'bind' mounted or actually Cross-Device - - if renamefailed: - didcopy = 0 - if stat.S_ISREG(sstat[stat.ST_MODE]): - try: # For safety copy then move it over. - shutil.copyfile(src, destpath + "#new") - bb.utils.rename(destpath + "#new", destpath) - didcopy = 1 - except Exception as e: - logger.warning('movefile: copy', src, '->', dest, 'failed.', e) - return None - else: - #we don't yet handle special, so we need to fall back to /bin/mv - a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'") - if a[0] != 0: - logger.warning("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a) - return None # failure - try: - if didcopy: - os.lchown(destpath, sstat[stat.ST_UID], sstat[stat.ST_GID]) - os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown - os.unlink(src) - except Exception as e: - logger.warning("movefile: Failed to chown/chmod/unlink", dest, e) - return None - - if newmtime: - os.utime(destpath, (newmtime, newmtime)) - else: - os.utime(destpath, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) - newmtime = sstat[stat.ST_MTIME] - return newmtime - -def copyfile(src, dest, newmtime = None, sstat = None): - """ - Copies a file from ``src`` to ``dest``, preserving all permissions and - attributes; mtime will be preserved even when moving across - filesystems. - - Arguments: - - - ``src``: Source file. - - ``dest``: Destination file. - - ``newmtime``: new mtime to be passed as float seconds since the epoch. - - ``sstat``: os.stat_result to use for the destination file. - - Returns an ``os.stat_result`` of the destination file if the - source file is a symbolic link or the ``sstat`` argument represents a - symbolic link - in which case the destination file will also be created as - a symbolic link. - - Otherwise, returns ``newmtime`` on success and ``False`` on failure. - - """ - #print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")" - try: - if not sstat: - sstat = os.lstat(src) - except Exception as e: - logger.warning("copyfile: stat of %s failed (%s)" % (src, e)) - return False - - destexists = 1 - try: - dstat = os.lstat(dest) - except: - dstat = os.lstat(os.path.dirname(dest)) - destexists = 0 - - if destexists: - if stat.S_ISLNK(dstat[stat.ST_MODE]): - try: - os.unlink(dest) - destexists = 0 - except Exception as e: - pass - - if stat.S_ISLNK(sstat[stat.ST_MODE]): - try: - target = os.readlink(src) - if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): - os.unlink(dest) - os.symlink(target, dest) - os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) - return os.lstat(dest) - except Exception as e: - logger.warning("copyfile: failed to create symlink %s to %s (%s)" % (dest, target, e)) - return False - - if stat.S_ISREG(sstat[stat.ST_MODE]): - try: - srcchown = False - if not os.access(src, os.R_OK): - # Make sure we can read it - srcchown = True - os.chmod(src, sstat[stat.ST_MODE] | stat.S_IRUSR) - - # For safety copy then move it over. - shutil.copyfile(src, dest + "#new") - bb.utils.rename(dest + "#new", dest) - except Exception as e: - logger.warning("copyfile: copy %s to %s failed (%s)" % (src, dest, e)) - return False - finally: - if srcchown: - os.chmod(src, sstat[stat.ST_MODE]) - os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) - - else: - #we don't yet handle special, so we need to fall back to /bin/mv - a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'") - if a[0] != 0: - logger.warning("copyfile: failed to copy special file %s to %s (%s)" % (src, dest, a)) - return False # failure - try: - os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) - os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown - except Exception as e: - logger.warning("copyfile: failed to chown/chmod %s (%s)" % (dest, e)) - return False - - if newmtime: - os.utime(dest, (newmtime, newmtime)) - else: - os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) - newmtime = sstat[stat.ST_MTIME] - return newmtime - -def break_hardlinks(src, sstat = None): - """ - Ensures ``src`` is the only hardlink to this file. Other hardlinks, - if any, are not affected (other than in their st_nlink value, of - course). - - Arguments: - - - ``src``: source file path. - - ``sstat``: os.stat_result to use when checking if the file is a link. - - Returns ``True`` on success and ``False`` on failure. - """ - try: - if not sstat: - sstat = os.lstat(src) - except Exception as e: - logger.warning("break_hardlinks: stat of %s failed (%s)" % (src, e)) - return False - if sstat[stat.ST_NLINK] == 1: - return True - return copyfile(src, src, sstat=sstat) - -def which(path, item, direction = 0, history = False, executable=False): - """ - Locate ``item`` in the list of paths ``path`` (colon separated string like - ``$PATH``). - - Arguments: - - - ``path``: list of colon-separated paths. - - ``item``: string to search for. - - ``direction``: if non-zero then the list is reversed. - - ``history``: if ``True`` then the list of candidates also returned as - ``result,history`` where ``history`` is the list of previous path - checked. - - ``executable``: if ``True`` then the candidate defined by ``path`` has - to be an executable file, otherwise if ``False`` the candidate simply - has to exist. - - Returns the item if found in the list of path, otherwise an empty string. - If ``history`` is ``True``, return the list of previous path checked in a - tuple with the found (or not found) item as ``(item, history)``. - """ - - if executable: - is_candidate = lambda p: os.path.isfile(p) and os.access(p, os.X_OK) - else: - is_candidate = lambda p: os.path.exists(p) - - hist = [] - paths = (path or "").split(':') - if direction != 0: - paths.reverse() - - for p in paths: - next = os.path.join(p, item) - hist.append(next) - if is_candidate(next): - if not os.path.isabs(next): - next = os.path.abspath(next) - if history: - return next, hist - return next - - if history: - return "", hist - return "" - -def to_filemode(input): - """ - Take a bitbake variable contents defining a file mode and return - the proper python representation of the number - - Arguments: - - - ``input``: a string or number to convert, e.g. a bitbake variable - string, assumed to be an octal representation - - Returns the python file mode as a number - """ - # umask might come in as a number or text string.. - if type(input) is int: - return input - return int(input, 8) - -@contextmanager -def umask(new_mask): - """ - Context manager to set the umask to a specific mask, and restore it afterwards. - - No return value. - """ - current_mask = os.umask(new_mask) - try: - yield - finally: - os.umask(current_mask) - -def to_boolean(string, default=None): - """ - Check input string and return boolean value True/False/None - depending upon the checks. - - Arguments: - - - ``string``: input string. - - ``default``: default return value if the input ``string`` is ``None``, - ``0``, ``False`` or an empty string. - - Returns ``True`` if the string is one of "y", "yes", "1", "true", ``False`` - if the string is one of "n", "no", "0", or "false". Return ``default`` if - the input ``string`` is ``None``, ``0``, ``False`` or an empty string. - """ - if not string: - return default - - if isinstance(string, int): - return string != 0 - - normalized = string.lower() - if normalized in ("y", "yes", "1", "true"): - return True - elif normalized in ("n", "no", "0", "false"): - return False - else: - raise ValueError("Invalid value for to_boolean: %s" % string) - -def contains(variable, checkvalues, truevalue, falsevalue, d): - """Check if a variable contains all the values specified. - - Arguments: - - - ``variable``: the variable name. This will be fetched and expanded (using - d.getVar(variable)) and then split into a set(). - - ``checkvalues``: if this is a string it is split on whitespace into a set(), - otherwise coerced directly into a set(). - - ``truevalue``: the value to return if checkvalues is a subset of variable. - - ``falsevalue``: the value to return if variable is empty or if checkvalues is - not a subset of variable. - - ``d``: the data store. - - Returns ``True`` if the variable contains the values specified, ``False`` - otherwise. - """ - - val = d.getVar(variable) - if not val: - return falsevalue - val = set(val.split()) - if isinstance(checkvalues, str): - checkvalues = set(checkvalues.split()) - else: - checkvalues = set(checkvalues) - if checkvalues.issubset(val): - return truevalue - return falsevalue - -def contains_any(variable, checkvalues, truevalue, falsevalue, d): - """Check if a variable contains any values specified. - - Arguments: - - - ``variable``: the variable name. This will be fetched and expanded (using - d.getVar(variable)) and then split into a set(). - - ``checkvalues``: if this is a string it is split on whitespace into a set(), - otherwise coerced directly into a set(). - - ``truevalue``: the value to return if checkvalues is a subset of variable. - - ``falsevalue``: the value to return if variable is empty or if checkvalues is - not a subset of variable. - - ``d``: the data store. - - Returns ``True`` if the variable contains any of the values specified, - ``False`` otherwise. - """ - val = d.getVar(variable) - if not val: - return falsevalue - val = set(val.split()) - if isinstance(checkvalues, str): - checkvalues = set(checkvalues.split()) - else: - checkvalues = set(checkvalues) - if checkvalues & val: - return truevalue - return falsevalue - -def filter(variable, checkvalues, d): - """Return all words in the variable that are present in the ``checkvalues``. - - Arguments: - - - ``variable``: the variable name. This will be fetched and expanded (using - d.getVar(variable)) and then split into a set(). - - ``checkvalues``: if this is a string it is split on whitespace into a set(), - otherwise coerced directly into a set(). - - ``d``: the data store. - - Returns a list of string. - """ - - val = d.getVar(variable) - if not val: - return '' - val = set(val.split()) - if isinstance(checkvalues, str): - checkvalues = set(checkvalues.split()) - else: - checkvalues = set(checkvalues) - return ' '.join(sorted(checkvalues & val)) - - -def get_referenced_vars(start_expr, d): - """ - Get the names of the variables referenced in a given expression. - - Arguments: - - - ``start_expr``: the expression where to look for variables references. - - For example:: - - ${VAR_A} string ${VAR_B} - - Or:: - - ${@d.getVar('VAR')} - - If a variables makes references to other variables, the latter are also - returned recursively. - - - ``d``: the data store. - - Returns the names of vars referenced in ``start_expr`` (recursively), in - quasi-BFS order (variables within the same level are ordered arbitrarily). - """ - - seen = set() - ret = [] - - # The first entry in the queue is the unexpanded start expression - queue = collections.deque([start_expr]) - # Subsequent entries will be variable names, so we need to track whether or not entry requires getVar - is_first = True - - empty_data = bb.data.init() - while queue: - entry = queue.popleft() - if is_first: - # Entry is the start expression - no expansion needed - is_first = False - expression = entry - else: - # This is a variable name - need to get the value - expression = d.getVar(entry, False) - ret.append(entry) - - # expandWithRefs is how we actually get the referenced variables in the expression. We call it using an empty - # data store because we only want the variables directly used in the expression. It returns a set, which is what - # dooms us to only ever be "quasi-BFS" rather than full BFS. - new_vars = empty_data.expandWithRefs(expression, None).references - set(seen) - - queue.extend(new_vars) - seen.update(new_vars) - return ret - - -def cpu_count(): - try: - return len(os.sched_getaffinity(0)) - except OSError: - return multiprocessing.cpu_count() - -def nonblockingfd(fd): - fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK) - -def profile_function(profile, function, output_fn, process=True): - """Common function to profile a code block and optionally process the - output using or processing function. - - Arguments: - - - ``profile``: a boolean saying whether to enable profiling or not - - ``function``: the function call to profile/run - - ``outputfn``: where to write the profiling data - - ``process``: whether to process the profiling data and write a report - - Returns the wrapped function return value - """ - if profile: - try: - import cProfile as profile - except: - import profile - prof = profile.Profile() - ret = profile.Profile.runcall(prof, function) - prof.dump_stats(output_fn) - if process: - process_profilelog(output_fn) - serverlog("Raw profiling information saved to %s and processed statistics to %s.report*" % (output_fn, output_fn)) - return ret - else: - return function() - -def process_profilelog(fn, fn_out = None): - # Either call with a list of filenames and set pout or a filename and optionally pout. - import pstats - - if not fn_out: - fn_out = fn + '.report' - - def pstatopen(): - if isinstance(fn, list): - return pstats.Stats(*fn, stream=pout) - return pstats.Stats(fn, stream=pout) - - with open(fn_out + '.time', 'w') as pout: - p = pstatopen() - p.sort_stats('time') - p.print_stats() - - with open(fn_out + '.time-callers', 'w') as pout: - p = pstatopen() - p.sort_stats('time') - p.print_callers() - - with open(fn_out + '.cumulative', 'w') as pout: - p = pstatopen() - p.sort_stats('cumulative') - p.print_stats() - - with open(fn_out + '.cumulative-callers', 'w') as pout: - p = pstatopen() - p.sort_stats('cumulative') - p.print_callers() - - -def exec_flat_python_func(func, *args, **kwargs): - """Execute a flat python function (defined with ``def funcname(args): ...``) - - Returns the return value of the function.""" - # Prepare a small piece of python code which calls the requested function - # To do this we need to prepare two things - a set of variables we can use to pass - # the values of arguments into the calling function, and the list of arguments for - # the function being called - context = {} - funcargs = [] - # Handle unnamed arguments - aidx = 1 - for arg in args: - argname = 'arg_%s' % aidx - context[argname] = arg - funcargs.append(argname) - aidx += 1 - # Handle keyword arguments - context.update(kwargs) - funcargs.extend(['%s=%s' % (arg, arg) for arg in kwargs.keys()]) - code = 'retval = %s(%s)' % (func, ', '.join(funcargs)) - comp = bb.utils.better_compile(code, '', '') - bb.utils.better_exec(comp, context, code, '') - return context['retval'] - -def edit_metadata(meta_lines, variables, varfunc, match_overrides=False): - """Edit lines from a recipe or config file and modify one or more - specified variable values set in the file using a specified callback - function. Lines are expected to have trailing newlines. - - Arguments: - - - ``meta_lines``: lines from the file; can be a list or an iterable - (e.g. file pointer) - - ``variables``: a list of variable names to look for. Functions - may also be specified, but must be specified with ``()`` at - the end of the name. Note that the function doesn't have - any intrinsic understanding of ``:append``, ``:prepend``, ``:remove``, - or overrides, so these are considered as part of the name. - These values go into a regular expression, so regular - expression syntax is allowed. - - ``varfunc``: callback function called for every variable matching - one of the entries in the variables parameter. - - The function should take four arguments: - - - ``varname``: name of variable matched - - ``origvalue``: current value in file - - ``op``: the operator (e.g. ``+=``) - - ``newlines``: list of lines up to this point. You can use - this to prepend lines before this variable setting - if you wish. - - And should return a four-element tuple: - - - ``newvalue``: new value to substitute in, or ``None`` to drop - the variable setting entirely. (If the removal - results in two consecutive blank lines, one of the - blank lines will also be dropped). - - ``newop``: the operator to use - if you specify ``None`` here, - the original operation will be used. - - ``indent``: number of spaces to indent multi-line entries, - or ``-1`` to indent up to the level of the assignment - and opening quote, or a string to use as the indent. - - ``minbreak``: ``True`` to allow the first element of a - multi-line value to continue on the same line as - the assignment, ``False`` to indent before the first - element. - - To clarify, if you wish not to change the value, then you - would return like this:: - - return origvalue, None, 0, True - - ``match_overrides``: True to match items with _overrides on the end, - False otherwise - - Returns a tuple: - - - ``updated``: ``True`` if changes were made, ``False`` otherwise. - - ``newlines``: Lines after processing. - """ - - var_res = {} - if match_overrides: - override_re = r'(_[a-zA-Z0-9-_$(){}]+)?' - else: - override_re = '' - for var in variables: - if var.endswith('()'): - var_res[var] = re.compile(r'^(%s%s)[ \\t]*\([ \\t]*\)[ \\t]*{' % (var[:-2].rstrip(), override_re)) - else: - var_res[var] = re.compile(r'^(%s%s)[ \\t]*[?+:.]*=[+.]*[ \\t]*(["\'])' % (var, override_re)) - - updated = False - varset_start = '' - varlines = [] - newlines = [] - in_var = None - full_value = '' - var_end = '' - - def handle_var_end(): - prerun_newlines = newlines[:] - op = varset_start[len(in_var):].strip() - (newvalue, newop, indent, minbreak) = varfunc(in_var, full_value, op, newlines) - changed = (prerun_newlines != newlines) - - if newvalue is None: - # Drop the value - return True - elif newvalue != full_value or (newop not in [None, op]): - if newop not in [None, op]: - # Callback changed the operator - varset_new = "%s %s" % (in_var, newop) - else: - varset_new = varset_start - - if isinstance(indent, int): - if indent == -1: - indentspc = ' ' * (len(varset_new) + 2) - else: - indentspc = ' ' * indent - else: - indentspc = indent - if in_var.endswith('()'): - # A function definition - if isinstance(newvalue, list): - newlines.append('%s {\n%s%s\n}\n' % (varset_new, indentspc, ('\n%s' % indentspc).join(newvalue))) - else: - if not newvalue.startswith('\n'): - newvalue = '\n' + newvalue - if not newvalue.endswith('\n'): - newvalue = newvalue + '\n' - newlines.append('%s {%s}\n' % (varset_new, newvalue)) - else: - # Normal variable - if isinstance(newvalue, list): - if not newvalue: - # Empty list -> empty string - newlines.append('%s ""\n' % varset_new) - elif minbreak: - # First item on first line - if len(newvalue) == 1: - newlines.append('%s "%s"\n' % (varset_new, newvalue[0])) - else: - newlines.append('%s "%s \\\n' % (varset_new, newvalue[0])) - for item in newvalue[1:]: - newlines.append('%s%s \\\n' % (indentspc, item)) - newlines.append('%s"\n' % indentspc) - else: - # No item on first line - newlines.append('%s " \\\n' % varset_new) - for item in newvalue: - newlines.append('%s%s \\\n' % (indentspc, item)) - newlines.append('%s"\n' % indentspc) - else: - newlines.append('%s "%s"\n' % (varset_new, newvalue)) - return True - else: - # Put the old lines back where they were - newlines.extend(varlines) - # If newlines was touched by the function, we'll need to return True - return changed - - checkspc = False - - for line in meta_lines: - if in_var: - value = line.rstrip() - varlines.append(line) - if in_var.endswith('()'): - full_value += '\n' + value - else: - full_value += value[:-1] - if value.endswith(var_end): - if in_var.endswith('()'): - if full_value.count('{') - full_value.count('}') >= 0: - continue - full_value = full_value[:-1] - if handle_var_end(): - updated = True - checkspc = True - in_var = None - else: - skip = False - for (varname, var_re) in var_res.items(): - res = var_re.match(line) - if res: - isfunc = varname.endswith('()') - if isfunc: - splitvalue = line.split('{', 1) - var_end = '}' - else: - var_end = res.groups()[-1] - splitvalue = line.split(var_end, 1) - varset_start = splitvalue[0].rstrip() - value = splitvalue[1].rstrip() - if not isfunc and value.endswith('\\'): - value = value[:-1] - full_value = value - varlines = [line] - in_var = res.group(1) - if isfunc: - in_var += '()' - if value.endswith(var_end): - full_value = full_value[:-1] - if handle_var_end(): - updated = True - checkspc = True - in_var = None - skip = True - break - if not skip: - if checkspc: - checkspc = False - if newlines and newlines[-1] == '\n' and line == '\n': - # Squash blank line if there are two consecutive blanks after a removal - continue - newlines.append(line) - return (updated, newlines) - - -def edit_metadata_file(meta_file, variables, varfunc): - """Edit a recipe or configuration file and modify one or more specified - variable values set in the file using a specified callback function. - The file is only written to if the value(s) actually change. - This is basically the file version of ``bb.utils.edit_metadata()``, see that - function's description for parameter/usage information. - - Returns ``True`` if the file was written to, ``False`` otherwise. - """ - with open(meta_file, 'r') as f: - (updated, newlines) = edit_metadata(f, variables, varfunc) - if updated: - with open(meta_file, 'w') as f: - f.writelines(newlines) - return updated - - -def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None): - """Edit ``bblayers.conf``, adding and/or removing layers. - - Arguments: - - - ``bblayers_conf``: path to ``bblayers.conf`` file to edit - - ``add``: layer path (or list of layer paths) to add; ``None`` or empty - list to add nothing - - ``remove``: layer path (or list of layer paths) to remove; ``None`` or - empty list to remove nothing - - ``edit_cb``: optional callback function that will be called - after processing adds/removes once per existing entry. - - Returns a tuple: - - - ``notadded``: list of layers specified to be added but weren't - (because they were already in the list) - - ``notremoved``: list of layers that were specified to be removed - but weren't (because they weren't in the list) - """ - - def remove_trailing_sep(pth): - if pth and pth[-1] == os.sep: - pth = pth[:-1] - return pth - - approved = bb.utils.approved_variables() - def canonicalise_path(pth): - pth = remove_trailing_sep(pth) - if 'HOME' in approved and '~' in pth: - pth = os.path.expanduser(pth) - return pth - - def layerlist_param(value): - if not value: - return [] - elif isinstance(value, list): - return [remove_trailing_sep(x) for x in value] - else: - return [remove_trailing_sep(value)] - - addlayers = layerlist_param(add) - removelayers = layerlist_param(remove) - - # Need to use a list here because we can't set non-local variables from a callback in python 2.x - bblayercalls = [] - removed = [] - plusequals = False - orig_bblayers = [] - - def handle_bblayers_firstpass(varname, origvalue, op, newlines): - bblayercalls.append(op) - if op == '=': - del orig_bblayers[:] - orig_bblayers.extend([canonicalise_path(x) for x in origvalue.split()]) - return (origvalue, None, 2, False) - - def handle_bblayers(varname, origvalue, op, newlines): - updated = False - bblayers = [remove_trailing_sep(x) for x in origvalue.split()] - if removelayers: - for removelayer in removelayers: - for layer in bblayers: - if fnmatch.fnmatch(canonicalise_path(layer), canonicalise_path(removelayer)): - updated = True - bblayers.remove(layer) - removed.append(removelayer) - break - if addlayers and not plusequals: - for addlayer in addlayers: - if addlayer not in bblayers: - updated = True - bblayers.append(addlayer) - del addlayers[:] - - if edit_cb: - newlist = [] - for layer in bblayers: - res = edit_cb(layer, canonicalise_path(layer)) - if res != layer: - newlist.append(res) - updated = True - else: - newlist.append(layer) - bblayers = newlist - - if updated: - if op == '+=' and not bblayers: - bblayers = None - return (bblayers, None, 2, False) - else: - return (origvalue, None, 2, False) - - with open(bblayers_conf, 'r') as f: - (_, newlines) = edit_metadata(f, ['BBLAYERS'], handle_bblayers_firstpass) - - if not bblayercalls: - raise Exception('Unable to find BBLAYERS in %s' % bblayers_conf) - - # Try to do the "smart" thing depending on how the user has laid out - # their bblayers.conf file - if bblayercalls.count('+=') > 1: - plusequals = True - - removelayers_canon = [canonicalise_path(layer) for layer in removelayers] - notadded = [] - for layer in addlayers: - layer_canon = canonicalise_path(layer) - if layer_canon in orig_bblayers and not layer_canon in removelayers_canon: - notadded.append(layer) - notadded_canon = [canonicalise_path(layer) for layer in notadded] - addlayers[:] = [layer for layer in addlayers if canonicalise_path(layer) not in notadded_canon] - - (updated, newlines) = edit_metadata(newlines, ['BBLAYERS'], handle_bblayers) - if addlayers: - # Still need to add these - for addlayer in addlayers: - newlines.append('BBLAYERS += "%s"\n' % addlayer) - updated = True - - if updated: - with open(bblayers_conf, 'w') as f: - f.writelines(newlines) - - notremoved = list(set(removelayers) - set(removed)) - - return (notadded, notremoved) - -def get_collection_res(d): - collections = (d.getVar('BBFILE_COLLECTIONS') or '').split() - collection_res = {} - for collection in collections: - collection_res[collection] = d.getVar('BBFILE_PATTERN_%s' % collection) or '' - - return collection_res - - -def get_file_layer(filename, d, collection_res={}): - """Determine the collection (or layer name, as defined by a layer's - ``layer.conf`` file) containing the specified file. - - Arguments: - - - ``filename``: the filename to look for. - - ``d``: the data store. - - ``collection_res``: dictionary with the layer names as keys and file - patterns to match as defined with the BBFILE_COLLECTIONS and - BBFILE_PATTERN variables respectively. The return value of - ``bb.utils.get_collection_res()`` is the default if this variable is - not specified. - - Returns the layer name containing the file. If multiple layers contain the - file, the last matching layer name from collection_res is returned. - """ - if not collection_res: - collection_res = get_collection_res(d) - - def path_to_layer(path): - # Use longest path so we handle nested layers - matchlen = 0 - match = None - for collection, regex in collection_res.items(): - if len(regex) > matchlen and re.match(regex, path): - matchlen = len(regex) - match = collection - return match - - result = None - bbfiles = (d.getVar('BBFILES_PRIORITIZED') or '').split() - bbfilesmatch = False - for bbfilesentry in bbfiles: - if fnmatch.fnmatchcase(filename, bbfilesentry): - bbfilesmatch = True - result = path_to_layer(bbfilesentry) - break - - if not bbfilesmatch: - # Probably a bbclass - result = path_to_layer(filename) - - return result - - -# Constant taken from http://linux.die.net/include/linux/prctl.h -PR_SET_PDEATHSIG = 1 - -class PrCtlError(Exception): - pass - -def signal_on_parent_exit(signame): - """ - Trigger ``signame`` to be sent when the parent process dies. - - Arguments: - - - ``signame``: name of the signal. See ``man signal``. - - No return value. - """ - signum = getattr(signal, signame) - # http://linux.die.net/man/2/prctl - result = cdll['libc.so.6'].prctl(PR_SET_PDEATHSIG, signum) - if result != 0: - raise PrCtlError('prctl failed with error code %s' % result) - -# -# Manually call the ioprio syscall. We could depend on other libs like psutil -# however this gets us enough of what we need to bitbake for now without the -# dependency -# -_unamearch = os.uname()[4] -IOPRIO_WHO_PROCESS = 1 -IOPRIO_CLASS_SHIFT = 13 - -def ioprio_set(who, cls, value): - NR_ioprio_set = None - if _unamearch == "x86_64": - NR_ioprio_set = 251 - elif _unamearch[0] == "i" and _unamearch[2:3] == "86": - NR_ioprio_set = 289 - elif _unamearch == "aarch64": - NR_ioprio_set = 30 - - if NR_ioprio_set: - ioprio = value | (cls << IOPRIO_CLASS_SHIFT) - rc = cdll['libc.so.6'].syscall(NR_ioprio_set, IOPRIO_WHO_PROCESS, who, ioprio) - if rc != 0: - raise ValueError("Unable to set ioprio, syscall returned %s" % rc) - else: - bb.warn("Unable to set IO Prio for arch %s" % _unamearch) - -def set_process_name(name): - from ctypes import byref, create_string_buffer - # This is nice to have for debugging, not essential - try: - libc = cdll.LoadLibrary('libc.so.6') - buf = create_string_buffer(bytes(name, 'utf-8')) - libc.prctl(15, byref(buf), 0, 0, 0) - except: - pass - -def enable_loopback_networking(): - # From bits/ioctls.h - SIOCGIFFLAGS = 0x8913 - SIOCSIFFLAGS = 0x8914 - SIOCSIFADDR = 0x8916 - SIOCSIFNETMASK = 0x891C - - # if.h - IFF_UP = 0x1 - IFF_RUNNING = 0x40 - - # bits/socket.h - AF_INET = 2 - - # char ifr_name[IFNAMSIZ=16] - ifr_name = struct.pack("@16s", b"lo") - def netdev_req(fd, req, data = b""): - # Pad and add interface name - data = ifr_name + data + (b'\x00' * (16 - len(data))) - # Return all data after interface name - return fcntl.ioctl(fd, req, data)[16:] - - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) as sock: - fd = sock.fileno() - - # struct sockaddr_in ifr_addr { unsigned short family; uint16_t sin_port ; uint32_t in_addr; } - req = struct.pack("@H", AF_INET) + struct.pack("=H4B", 0, 127, 0, 0, 1) - netdev_req(fd, SIOCSIFADDR, req) - - # short ifr_flags - flags = struct.unpack_from('@h', netdev_req(fd, SIOCGIFFLAGS))[0] - flags |= IFF_UP | IFF_RUNNING - netdev_req(fd, SIOCSIFFLAGS, struct.pack('@h', flags)) - - # struct sockaddr_in ifr_netmask - req = struct.pack("@H", AF_INET) + struct.pack("=H4B", 0, 255, 0, 0, 0) - netdev_req(fd, SIOCSIFNETMASK, req) - -def disable_network(uid=None, gid=None): - """ - Disable networking in the current process if the kernel supports it, else - just return after logging to debug. To do this we need to create a new user - namespace, then map back to the original uid/gid. - - Arguments: - - - ``uid``: original user id. - - ``gid``: original user group id. - - No return value. - """ - libc = ctypes.CDLL('libc.so.6') - - # From sched.h - # New user namespace - CLONE_NEWUSER = 0x10000000 - # New network namespace - CLONE_NEWNET = 0x40000000 - - if uid is None: - uid = os.getuid() - if gid is None: - gid = os.getgid() - - ret = libc.unshare(CLONE_NEWNET | CLONE_NEWUSER) - if ret != 0: - logger.debug("System doesn't support disabling network without admin privs") - return - with open("/proc/self/uid_map", "w") as f: - f.write("%s %s 1" % (uid, uid)) - with open("/proc/self/setgroups", "w") as f: - f.write("deny") - with open("/proc/self/gid_map", "w") as f: - f.write("%s %s 1" % (gid, gid)) - -def export_proxies(d): - from bb.fetch2 import get_fetcher_environment - """ export common proxies variables from datastore to environment """ - newenv = get_fetcher_environment(d) - for v in newenv: - os.environ[v] = newenv[v] - -def load_plugins(logger, plugins, pluginpath): - def load_plugin(name): - logger.debug('Loading plugin %s' % name) - spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] ) - if spec: - mod = importlib.util.module_from_spec(spec) - spec.loader.exec_module(mod) - return mod - - logger.debug('Loading plugins from %s...' % pluginpath) - - expanded = (glob.glob(os.path.join(pluginpath, '*' + ext)) - for ext in python_extensions) - files = itertools.chain.from_iterable(expanded) - names = set(os.path.splitext(os.path.basename(fn))[0] for fn in files) - for name in names: - if name != '__init__': - plugin = load_plugin(name) - if hasattr(plugin, 'plugin_init'): - obj = plugin.plugin_init(plugins) - plugins.append(obj or plugin) - else: - plugins.append(plugin) - - -class LogCatcher(logging.Handler): - """Logging handler for collecting logged messages so you can check them later""" - def __init__(self): - self.messages = [] - logging.Handler.__init__(self, logging.WARNING) - def emit(self, record): - self.messages.append(bb.build.logformatter.format(record)) - def contains(self, message): - return (message in self.messages) - -def is_semver(version): - """ - Arguments: - - - ``version``: the version string. - - Returns ``True`` if the version string follow semantic versioning, ``False`` - otherwise. - - See https://semver.org/spec/v2.0.0.html. - """ - regex = re.compile( - r""" - ^ - (0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*) - (?:-( - (?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*) - (?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))* - ))? - (?:\+( - [0-9a-zA-Z-]+ - (?:\.[0-9a-zA-Z-]+)* - ))? - $ - """, re.VERBOSE) - - if regex.match(version) is None: - return False - - return True - -# Wrapper around os.rename which can handle cross device problems -# e.g. from container filesystems -def rename(src, dst): - try: - os.rename(src, dst) - except OSError as err: - if err.errno == 18: - # Invalid cross-device link error - shutil.move(src, dst) - else: - raise err - -@contextmanager -def environment(**envvars): - """ - Context manager to selectively update the environment with the specified mapping. - - No return value. - """ - backup = dict(os.environ) - try: - os.environ.update(envvars) - yield - finally: - for var in envvars: - if var in backup: - os.environ[var] = backup[var] - elif var in os.environ: - del os.environ[var] - -def is_local_uid(uid=''): - """ - Check whether uid is a local one or not. - Can't use pwd module since it gets all UIDs, not local ones only. - - Arguments: - - - ``uid``: user id. If not specified the user id is determined from - ``os.getuid()``. - - Returns ``True`` is the user id is local, ``False`` otherwise. - """ - if not uid: - uid = os.getuid() - with open('/etc/passwd', 'r') as f: - for line in f: - line_split = line.split(':') - if len(line_split) < 3: - continue - if str(uid) == line_split[2]: - return True - return False - -def mkstemp(suffix=None, prefix=None, dir=None, text=False): - """ - Generates a unique temporary file, independent of time. - - mkstemp() in glibc (at least) generates unique file names based on the - current system time. When combined with highly parallel builds, and - operating over NFS (e.g. shared sstate/downloads) this can result in - conflicts and race conditions. - - This function adds additional entropy to the file name so that a collision - is independent of time and thus extremely unlikely. - - Arguments: - - - ``suffix``: filename suffix. - - ``prefix``: filename prefix. - - ``dir``: directory where the file will be created. - - ``text``: if ``True``, the file is opened in text mode. - - Returns a tuple containing: - - - the file descriptor for the created file - - the name of the file. - """ - entropy = "".join(random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=20)) - if prefix: - prefix = prefix + entropy - else: - prefix = tempfile.gettempprefix() + entropy - return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text) - -def path_is_descendant(descendant, ancestor): - """ - Returns ``True`` if the path ``descendant`` is a descendant of ``ancestor`` - (including being equivalent to ``ancestor`` itself). Otherwise returns - ``False``. - - Correctly accounts for symlinks, bind mounts, etc. by using - ``os.path.samestat()`` to compare paths. - - May raise any exception that ``os.stat()`` raises. - - Arguments: - - - ``descendant``: path to check for being an ancestor. - - ``ancestor``: path to the ancestor ``descendant`` will be checked - against. - """ - - ancestor_stat = os.stat(ancestor) - - # Recurse up each directory component of the descendant to see if it is - # equivalent to the ancestor - check_dir = os.path.abspath(descendant).rstrip("/") - while check_dir: - check_stat = os.stat(check_dir) - if os.path.samestat(check_stat, ancestor_stat): - return True - check_dir = os.path.dirname(check_dir).rstrip("/") - - return False - -# Recomputing the sets in signal.py is expensive (bitbake -pP idle) -# so try and use _signal directly to avoid it -valid_signals = signal.valid_signals() -try: - import _signal - sigmask = _signal.pthread_sigmask -except ImportError: - sigmask = signal.pthread_sigmask - -# If we don't have a timeout of some kind and a process/thread exits badly (for example -# OOM killed) and held a lock, we'd just hang in the lock futex forever. It is better -# we exit at some point than hang. 5 minutes with no progress means we're probably deadlocked. -# This function can still deadlock python since it can't signal the other threads to exit -# (signals are handled in the main thread) and even os._exit() will wait on non-daemon threads -# to exit. -@contextmanager -def lock_timeout(lock): - try: - s = sigmask(signal.SIG_BLOCK, valid_signals) - held = lock.acquire(timeout=5*60) - if not held: - bb.server.process.serverlog("Couldn't get the lock for 5 mins, timed out, exiting.\n%s" % traceback.format_stack()) - os._exit(1) - yield held - finally: - lock.release() - sigmask(signal.SIG_SETMASK, s) - -# A version of lock_timeout without the check that the lock was locked and a shorter timeout -@contextmanager -def lock_timeout_nocheck(lock): - l = False - try: - s = sigmask(signal.SIG_BLOCK, valid_signals) - l = lock.acquire(timeout=10) - yield l - finally: - if l: - lock.release() - sigmask(signal.SIG_SETMASK, s) diff --git a/bitbake/lib/bb/xattr.py b/bitbake/lib/bb/xattr.py deleted file mode 100755 index 7b634944a4..0000000000 --- a/bitbake/lib/bb/xattr.py +++ /dev/null @@ -1,126 +0,0 @@ -#! /usr/bin/env python3 -# -# Copyright 2023 by Garmin Ltd. or its subsidiaries -# -# SPDX-License-Identifier: MIT - -import sys -import ctypes -import os -import errno - -libc = ctypes.CDLL("libc.so.6", use_errno=True) -fsencoding = sys.getfilesystemencoding() - - -libc.listxattr.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_size_t] -libc.llistxattr.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_size_t] - - -def listxattr(path, follow=True): - func = libc.listxattr if follow else libc.llistxattr - - os_path = os.fsencode(path) - - while True: - length = func(os_path, None, 0) - - if length < 0: - err = ctypes.get_errno() - raise OSError(err, os.strerror(err), str(path)) - - if length == 0: - return [] - - arr = ctypes.create_string_buffer(length) - - read_length = func(os_path, arr, length) - if read_length != length: - # Race! - continue - - return [a.decode(fsencoding) for a in arr.raw.split(b"\x00") if a] - - -libc.getxattr.argtypes = [ - ctypes.c_char_p, - ctypes.c_char_p, - ctypes.c_char_p, - ctypes.c_size_t, -] -libc.lgetxattr.argtypes = [ - ctypes.c_char_p, - ctypes.c_char_p, - ctypes.c_char_p, - ctypes.c_size_t, -] - - -def getxattr(path, name, follow=True): - func = libc.getxattr if follow else libc.lgetxattr - - os_path = os.fsencode(path) - os_name = os.fsencode(name) - - while True: - length = func(os_path, os_name, None, 0) - - if length < 0: - err = ctypes.get_errno() - if err == errno.ENODATA: - return None - raise OSError(err, os.strerror(err), str(path)) - - if length == 0: - return "" - - arr = ctypes.create_string_buffer(length) - - read_length = func(os_path, os_name, arr, length) - if read_length != length: - # Race! - continue - - return arr.raw - - -def get_all_xattr(path, follow=True): - attrs = {} - - names = listxattr(path, follow) - - for name in names: - value = getxattr(path, name, follow) - if value is None: - # This can happen if a value is erased after listxattr is called, - # so ignore it - continue - attrs[name] = value - - return attrs - - -def main(): - import argparse - from pathlib import Path - - parser = argparse.ArgumentParser() - parser.add_argument("path", help="File Path", type=Path) - - args = parser.parse_args() - - attrs = get_all_xattr(args.path) - - for name, value in attrs.items(): - try: - value = value.decode(fsencoding) - except UnicodeDecodeError: - pass - - print(f"{name} = {value}") - - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/bitbake/lib/bblayers/__init__.py b/bitbake/lib/bblayers/__init__.py deleted file mode 100644 index 78efd29750..0000000000 --- a/bitbake/lib/bblayers/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -from pkgutil import extend_path -__path__ = extend_path(__path__, __name__) diff --git a/bitbake/lib/bblayers/action.py b/bitbake/lib/bblayers/action.py deleted file mode 100644 index a14f19948e..0000000000 --- a/bitbake/lib/bblayers/action.py +++ /dev/null @@ -1,279 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import fnmatch -import logging -import os -import shutil -import sys -import tempfile - -from bb.cookerdata import findTopdir -import bb.utils - -from bblayers.common import LayerPlugin - -logger = logging.getLogger('bitbake-layers') - - -def plugin_init(plugins): - return ActionPlugin() - - -class ActionPlugin(LayerPlugin): - def do_add_layer(self, args): - """Add one or more layers to bblayers.conf.""" - layerdirs = [os.path.abspath(ldir) for ldir in args.layerdir] - - for layerdir in layerdirs: - if not os.path.exists(layerdir): - sys.stderr.write("Specified layer directory %s doesn't exist\n" % layerdir) - return 1 - - layer_conf = os.path.join(layerdir, 'conf', 'layer.conf') - if not os.path.exists(layer_conf): - sys.stderr.write("Specified layer directory %s doesn't contain a conf/layer.conf file\n" % layerdir) - return 1 - - bblayers_conf = os.path.join(findTopdir(),'conf', 'bblayers.conf') - if not os.path.exists(bblayers_conf): - sys.stderr.write("Unable to find bblayers.conf\n") - return 1 - - # Back up bblayers.conf to tempdir before we add layers - tempdir = tempfile.mkdtemp() - backup = tempdir + "/bblayers.conf.bak" - shutil.copy2(bblayers_conf, backup) - - try: - notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdirs, None) - if not (args.force or notadded): - self.tinfoil.modified_files() - try: - self.tinfoil.run_command('parseConfiguration') - except (bb.tinfoil.TinfoilUIException, bb.BBHandledException): - # Restore the back up copy of bblayers.conf - shutil.copy2(backup, bblayers_conf) - self.tinfoil.modified_files() - bb.fatal("Parse failure with the specified layer added, exiting.") - else: - for item in notadded: - sys.stderr.write("Specified layer %s is already in BBLAYERS\n" % item) - finally: - # Remove the back up copy of bblayers.conf - shutil.rmtree(tempdir) - - def do_remove_layer(self, args): - """Remove one or more layers from bblayers.conf.""" - bblayers_conf = os.path.join(findTopdir() ,'conf', 'bblayers.conf') - if not os.path.exists(bblayers_conf): - sys.stderr.write("Unable to find bblayers.conf\n") - return 1 - - layerdirs = [] - for item in args.layerdir: - if item.startswith('*'): - layerdir = item - elif not '/' in item: - layerdir = '*/%s' % item - else: - layerdir = os.path.abspath(item) - layerdirs.append(layerdir) - (_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdirs) - if args.force > 1: - return 0 - self.tinfoil.modified_files() - if notremoved: - for item in notremoved: - sys.stderr.write("No layers matching %s found in BBLAYERS\n" % item) - return 1 - - def do_flatten(self, args): - """flatten layer configuration into a separate output directory. - -Takes the specified layers (or all layers in the current layer -configuration if none are specified) and builds a "flattened" directory -containing the contents of all layers, with any overlayed recipes removed -and bbappends appended to the corresponding recipes. Note that some manual -cleanup may still be necessary afterwards, in particular: - -* where non-recipe files (such as patches) are overwritten (the flatten - command will show a warning for these) -* where anything beyond the normal layer setup has been added to - layer.conf (only the lowest priority number layer's layer.conf is used) -* overridden/appended items from bbappends will need to be tidied up -* when the flattened layers do not have the same directory structure (the - flatten command should show a warning when this will cause a problem) - -Warning: if you flatten several layers where another layer is intended to -be used "inbetween" them (in layer priority order) such that recipes / -bbappends in the layers interact, and then attempt to use the new output -layer together with that other layer, you may no longer get the same -build results (as the layer priority order has effectively changed). -""" - if len(args.layer) == 1: - logger.error('If you specify layers to flatten you must specify at least two') - return 1 - - outputdir = args.outputdir - if os.path.exists(outputdir) and os.listdir(outputdir): - logger.error('Directory %s exists and is non-empty, please clear it out first' % outputdir) - return 1 - - layers = self.bblayers - if len(args.layer) > 2: - layernames = args.layer - found_layernames = [] - found_layerdirs = [] - for layerdir in layers: - layername = self.get_layer_name(layerdir) - if layername in layernames: - found_layerdirs.append(layerdir) - found_layernames.append(layername) - - for layername in layernames: - if not layername in found_layernames: - logger.error('Unable to find layer %s in current configuration, please run "%s show-layers" to list configured layers' % (layername, os.path.basename(sys.argv[0]))) - return - layers = found_layerdirs - else: - layernames = [] - - # Ensure a specified path matches our list of layers - def layer_path_match(path): - for layerdir in layers: - if path.startswith(os.path.join(layerdir, '')): - return layerdir - return None - - applied_appends = [] - for layer in layers: - overlayed = set() - for mc in self.tinfoil.cooker.multiconfigs: - for f in self.tinfoil.cooker.collections[mc].overlayed.keys(): - for of in self.tinfoil.cooker.collections[mc].overlayed[f]: - if of.startswith(layer): - overlayed.add(of) - - logger.plain('Copying files from %s...' % layer ) - for root, dirs, files in os.walk(layer): - if '.git' in dirs: - dirs.remove('.git') - if '.hg' in dirs: - dirs.remove('.hg') - - for f1 in files: - f1full = os.sep.join([root, f1]) - if f1full in overlayed: - logger.plain(' Skipping overlayed file %s' % f1full ) - else: - ext = os.path.splitext(f1)[1] - if ext != '.bbappend': - fdest = f1full[len(layer):] - fdest = os.path.normpath(os.sep.join([outputdir,fdest])) - bb.utils.mkdirhier(os.path.dirname(fdest)) - if os.path.exists(fdest): - if f1 == 'layer.conf' and root.endswith('/conf'): - logger.plain(' Skipping layer config file %s' % f1full ) - continue - else: - logger.warning('Overwriting file %s', fdest) - bb.utils.copyfile(f1full, fdest) - if ext == '.bb': - appends = set() - for mc in self.tinfoil.cooker.multiconfigs: - appends |= set(self.tinfoil.cooker.collections[mc].get_file_appends(f1full)) - for append in appends: - if layer_path_match(append): - logger.plain(' Applying append %s to %s' % (append, fdest)) - self.apply_append(append, fdest) - applied_appends.append(append) - - # Take care of when some layers are excluded and yet we have included bbappends for those recipes - bbappends = set() - for mc in self.tinfoil.cooker.multiconfigs: - bbappends |= set(self.tinfoil.cooker.collections[mc].bbappends) - - for b in bbappends: - (recipename, appendname) = b - if appendname not in applied_appends: - first_append = None - layer = layer_path_match(appendname) - if layer: - if first_append: - self.apply_append(appendname, first_append) - else: - fdest = appendname[len(layer):] - fdest = os.path.normpath(os.sep.join([outputdir,fdest])) - bb.utils.mkdirhier(os.path.dirname(fdest)) - bb.utils.copyfile(appendname, fdest) - first_append = fdest - - # Get the regex for the first layer in our list (which is where the conf/layer.conf file will - # have come from) - first_regex = None - layerdir = layers[0] - for layername, pattern, regex, _ in self.tinfoil.cooker.bbfile_config_priorities: - if regex.match(os.path.join(layerdir, 'test')): - first_regex = regex - break - - if first_regex: - # Find the BBFILES entries that match (which will have come from this conf/layer.conf file) - bbfiles = str(self.tinfoil.config_data.getVar('BBFILES')).split() - bbfiles_layer = [] - for item in bbfiles: - if first_regex.match(item): - newpath = os.path.join(outputdir, item[len(layerdir)+1:]) - bbfiles_layer.append(newpath) - - if bbfiles_layer: - # Check that all important layer files match BBFILES - for root, dirs, files in os.walk(outputdir): - for f1 in files: - ext = os.path.splitext(f1)[1] - if ext in ['.bb', '.bbappend']: - f1full = os.sep.join([root, f1]) - entry_found = False - for item in bbfiles_layer: - if fnmatch.fnmatch(f1full, item): - entry_found = True - break - if not entry_found: - logger.warning("File %s does not match the flattened layer's BBFILES setting, you may need to edit conf/layer.conf or move the file elsewhere" % f1full) - - self.tinfoil.modified_files() - - - def get_file_layer(self, filename): - layerdir = self.get_file_layerdir(filename) - if layerdir: - return self.get_layer_name(layerdir) - else: - return '?' - - def get_file_layerdir(self, filename): - layer = bb.utils.get_file_layer(filename, self.tinfoil.config_data) - return self.bbfile_collections.get(layer, None) - - def apply_append(self, appendname, recipename): - with open(appendname, 'r') as appendfile: - with open(recipename, 'a') as recipefile: - recipefile.write('\n') - recipefile.write('##### bbappended from %s #####\n' % self.get_file_layer(appendname)) - recipefile.writelines(appendfile.readlines()) - - def register_commands(self, sp): - parser_add_layer = self.add_command(sp, 'add-layer', self.do_add_layer, parserecipes=False) - parser_add_layer.add_argument('layerdir', nargs='+', help='Layer directory/directories to add') - - parser_remove_layer = self.add_command(sp, 'remove-layer', self.do_remove_layer, parserecipes=False) - parser_remove_layer.add_argument('layerdir', nargs='+', help='Layer directory/directories to remove (wildcards allowed, enclose in quotes to avoid shell expansion)') - parser_remove_layer.set_defaults(func=self.do_remove_layer) - - parser_flatten = self.add_command(sp, 'flatten', self.do_flatten) - parser_flatten.add_argument('layer', nargs='*', help='Optional layer(s) to flatten (otherwise all are flattened)') - parser_flatten.add_argument('outputdir', help='Output directory') diff --git a/bitbake/lib/bblayers/common.py b/bitbake/lib/bblayers/common.py deleted file mode 100644 index f7b9cee371..0000000000 --- a/bitbake/lib/bblayers/common.py +++ /dev/null @@ -1,39 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import argparse -import logging -import os - -logger = logging.getLogger('bitbake-layers') - - -class LayerPlugin(): - def __init__(self): - self.tinfoil = None - self.bblayers = [] - - def tinfoil_init(self, tinfoil): - self.tinfoil = tinfoil - self.bblayers = (self.tinfoil.config_data.getVar('BBLAYERS') or "").split() - layerconfs = self.tinfoil.config_data.varhistory.get_variable_items_files('BBFILE_COLLECTIONS') - self.bbfile_collections = {layer: os.path.dirname(os.path.dirname(path)) for layer, path in layerconfs.items()} - - @staticmethod - def add_command(subparsers, cmdname, function, parserecipes=True, *args, **kwargs): - """Convert docstring for function to help.""" - docsplit = function.__doc__.splitlines() - help = docsplit[0] - if len(docsplit) > 1: - desc = '\n'.join(docsplit[1:]) - else: - desc = help - subparser = subparsers.add_parser(cmdname, *args, help=help, description=desc, formatter_class=argparse.RawTextHelpFormatter, **kwargs) - subparser.set_defaults(func=function, parserecipes=parserecipes) - return subparser - - def get_layer_name(self, layerdir): - return os.path.basename(layerdir.rstrip(os.sep)) diff --git a/bitbake/lib/bblayers/layerindex.py b/bitbake/lib/bblayers/layerindex.py deleted file mode 100644 index ba91fac669..0000000000 --- a/bitbake/lib/bblayers/layerindex.py +++ /dev/null @@ -1,256 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import layerindexlib - -import argparse -import logging -import os -import subprocess - -from bblayers.action import ActionPlugin - -logger = logging.getLogger('bitbake-layers') - - -def plugin_init(plugins): - return LayerIndexPlugin() - - -class LayerIndexPlugin(ActionPlugin): - """Subcommands for interacting with the layer index. - - This class inherits ActionPlugin to get do_add_layer. - """ - - def get_fetch_layer(self, fetchdir, url, subdir, fetch_layer, branch, shallow=False): - layername = self.get_layer_name(url) - if os.path.splitext(layername)[1] == '.git': - layername = os.path.splitext(layername)[0] - repodir = os.path.join(fetchdir, layername) - layerdir = os.path.join(repodir, subdir) - if not os.path.exists(repodir): - if fetch_layer: - cmd = ['git', 'clone'] - if shallow: - cmd.extend(['--depth', '1']) - if branch: - cmd.extend(['-b' , branch]) - cmd.extend([url, repodir]) - result = subprocess.call(cmd) - if result: - logger.error("Failed to download %s (%s)" % (url, branch)) - return None, None, None - else: - return subdir, layername, layerdir - else: - logger.plain("Repository %s needs to be fetched" % url) - return subdir, layername, layerdir - elif os.path.exists(repodir) and branch: - """ - If the repo is already cloned, ensure it is on the correct branch, - switching branches if necessary and possible. - """ - base_cmd = ['git', '--git-dir=%s/.git' % repodir, '--work-tree=%s' % repodir] - cmd = base_cmd + ['branch'] - completed_proc = subprocess.run(cmd, text=True, capture_output=True) - if completed_proc.returncode: - logger.error("Unable to validate repo %s (%s)" % (repodir, stderr)) - return None, None, None - else: - if branch != completed_proc.stdout[2:-1]: - cmd = base_cmd + ['status', '--short'] - completed_proc = subprocess.run(cmd, text=True, capture_output=True) - if completed_proc.stdout.count('\n') != 0: - logger.warning("There are uncommitted changes in repo %s" % repodir) - cmd = base_cmd + ['checkout', branch] - completed_proc = subprocess.run(cmd, text=True, capture_output=True) - if completed_proc.returncode: - # Could be due to original shallow clone on a different branch for example - logger.error("Unable to automatically switch %s to desired branch '%s' (%s)" - % (repodir, branch, completed_proc.stderr)) - return None, None, None - return subdir, layername, layerdir - elif os.path.exists(layerdir): - return subdir, layername, layerdir - else: - logger.error("%s is not in %s" % (url, subdir)) - return None, None, None - - def do_layerindex_fetch(self, args): - """Fetches a layer from a layer index along with its dependent layers, and adds them to conf/bblayers.conf. -""" - - def _construct_url(baseurls, branches): - urls = [] - for baseurl in baseurls: - if baseurl[-1] != '/': - baseurl += '/' - - if not baseurl.startswith('cooker'): - baseurl += "api/" - - if branches: - baseurl += ";branch=%s" % ','.join(branches) - - urls.append(baseurl) - - return urls - - - # Set the default... - if args.branch: - branches = [args.branch] - else: - branches = (self.tinfoil.config_data.getVar('LAYERSERIES_CORENAMES') or 'master').split() - logger.debug('Trying branches: %s' % branches) - - ignore_layers = [] - if args.ignore: - ignore_layers.extend(args.ignore.split(',')) - - # Load the cooker DB - cookerIndex = layerindexlib.LayerIndex(self.tinfoil.config_data) - cookerIndex.load_layerindex('cooker://', load='layerDependencies') - - # Fast path, check if we already have what has been requested! - (dependencies, invalidnames) = cookerIndex.find_dependencies(names=args.layername, ignores=ignore_layers) - if not args.show_only and not invalidnames: - logger.plain("You already have the requested layer(s): %s" % args.layername) - return 0 - - # The information to show is already in the cookerIndex - if invalidnames: - # General URL to use to access the layer index - # While there is ONE right now, we're expect users could enter several - apiurl = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_URL').split() - if not apiurl: - logger.error("Cannot get BBLAYERS_LAYERINDEX_URL") - return 1 - - remoteIndex = layerindexlib.LayerIndex(self.tinfoil.config_data) - - for remoteurl in _construct_url(apiurl, branches): - logger.plain("Loading %s..." % remoteurl) - remoteIndex.load_layerindex(remoteurl) - - if remoteIndex.is_empty(): - logger.error("Remote layer index %s is empty for branches %s" % (apiurl, branches)) - return 1 - - lIndex = cookerIndex + remoteIndex - - (dependencies, invalidnames) = lIndex.find_dependencies(names=args.layername, ignores=ignore_layers) - - if invalidnames: - for invaluename in invalidnames: - logger.error('Layer "%s" not found in layer index' % invaluename) - return 1 - - logger.plain("%s %s %s" % ("Layer".ljust(49), "Git repository (branch)".ljust(54), "Subdirectory")) - logger.plain('=' * 125) - - for deplayerbranch in dependencies: - layerBranch = dependencies[deplayerbranch][0] - - # TODO: Determine display behavior - # This is the local content, uncomment to hide local - # layers from the display. - #if layerBranch.index.config['TYPE'] == 'cooker': - # continue - - layerDeps = dependencies[deplayerbranch][1:] - - requiredby = [] - recommendedby = [] - for dep in layerDeps: - if dep.required: - requiredby.append(dep.layer.name) - else: - recommendedby.append(dep.layer.name) - - logger.plain('%s %s %s' % (("%s:%s:%s" % - (layerBranch.index.config['DESCRIPTION'], - layerBranch.branch.name, - layerBranch.layer.name)).ljust(50), - ("%s (%s)" % (layerBranch.layer.vcs_url, - layerBranch.actual_branch)).ljust(55), - layerBranch.vcs_subdir - )) - if requiredby: - logger.plain(' required by: %s' % ' '.join(requiredby)) - if recommendedby: - logger.plain(' recommended by: %s' % ' '.join(recommendedby)) - - if dependencies: - if args.fetchdir: - fetchdir = args.fetchdir - else: - fetchdir = self.tinfoil.config_data.getVar('BBLAYERS_FETCH_DIR') - if not fetchdir: - logger.error("Cannot get BBLAYERS_FETCH_DIR") - return 1 - - if not os.path.exists(fetchdir): - os.makedirs(fetchdir) - - addlayers = [] - - for deplayerbranch in dependencies: - layerBranch = dependencies[deplayerbranch][0] - - if layerBranch.index.config['TYPE'] == 'cooker': - # Anything loaded via cooker is already local, skip it - continue - - subdir, name, layerdir = self.get_fetch_layer(fetchdir, - layerBranch.layer.vcs_url, - layerBranch.vcs_subdir, - not args.show_only, - layerBranch.actual_branch, - args.shallow) - if not name: - # Error already shown - return 1 - addlayers.append((subdir, name, layerdir)) - if not args.show_only: - localargs = argparse.Namespace() - localargs.layerdir = [] - localargs.force = args.force - for subdir, name, layerdir in addlayers: - if os.path.exists(layerdir): - if subdir: - logger.plain("Adding layer \"%s\" (%s) to conf/bblayers.conf" % (subdir, layerdir)) - else: - logger.plain("Adding layer \"%s\" (%s) to conf/bblayers.conf" % (name, layerdir)) - localargs.layerdir.append(layerdir) - else: - break - - if localargs.layerdir: - self.do_add_layer(localargs) - - def do_layerindex_show_depends(self, args): - """Find layer dependencies from layer index. -""" - args.show_only = True - args.ignore = [] - args.fetchdir = "" - args.shallow = True - self.do_layerindex_fetch(args) - - def register_commands(self, sp): - parser_layerindex_fetch = self.add_command(sp, 'layerindex-fetch', self.do_layerindex_fetch, parserecipes=False) - parser_layerindex_fetch.add_argument('-n', '--show-only', help='show dependencies and do nothing else', action='store_true') - parser_layerindex_fetch.add_argument('-b', '--branch', help='branch name to fetch') - parser_layerindex_fetch.add_argument('-s', '--shallow', help='do only shallow clones (--depth=1)', action='store_true') - parser_layerindex_fetch.add_argument('-i', '--ignore', help='assume the specified layers do not need to be fetched/added (separate multiple layers with commas, no spaces)', metavar='LAYER') - parser_layerindex_fetch.add_argument('-f', '--fetchdir', help='directory to fetch the layer(s) into (will be created if it does not exist)') - parser_layerindex_fetch.add_argument('layername', nargs='+', help='layer to fetch') - - parser_layerindex_show_depends = self.add_command(sp, 'layerindex-show-depends', self.do_layerindex_show_depends, parserecipes=False) - parser_layerindex_show_depends.add_argument('-b', '--branch', help='branch name to fetch') - parser_layerindex_show_depends.add_argument('layername', nargs='+', help='layer to query') diff --git a/bitbake/lib/bblayers/query.py b/bitbake/lib/bblayers/query.py deleted file mode 100644 index eb7cb465b4..0000000000 --- a/bitbake/lib/bblayers/query.py +++ /dev/null @@ -1,542 +0,0 @@ -# -# Copyright BitBake Contributors -# -# SPDX-License-Identifier: GPL-2.0-only -# - -import collections -import fnmatch -import logging -import sys -import os -import re - -import bb.utils - -from bblayers.common import LayerPlugin - -logger = logging.getLogger('bitbake-layers') - - -def plugin_init(plugins): - return QueryPlugin() - - -class QueryPlugin(LayerPlugin): - def __init__(self): - super(QueryPlugin, self).__init__() - self.collection_res = {} - - def do_show_layers(self, args): - """show current configured layers.""" - logger.plain("%s %s %s" % ("layer".ljust(20), "path".ljust(70), "priority")) - logger.plain('=' * 104) - for layer, _, regex, pri in self.tinfoil.cooker.bbfile_config_priorities: - layerdir = self.bbfile_collections.get(layer, None) - layername = layer - logger.plain("%s %s %s" % (layername.ljust(20), layerdir.ljust(70), pri)) - - def version_str(self, pe, pv, pr = None): - verstr = "%s" % pv - if pr: - verstr = "%s-%s" % (verstr, pr) - if pe: - verstr = "%s:%s" % (pe, verstr) - return verstr - - def do_show_overlayed(self, args): - """list overlayed recipes (where the same recipe exists in another layer) - -Lists the names of overlayed recipes and the available versions in each -layer, with the preferred version first. Note that skipped recipes that -are overlayed will also be listed, with a " (skipped)" suffix. -""" - - items_listed = self.list_recipes('Overlayed recipes', None, True, args.same_version, args.filenames, False, True, None, False, None, args.mc) - - # Check for overlayed .bbclass files - classes = collections.defaultdict(list) - for layerdir in self.bblayers: - for c in ["classes-global", "classes-recipe", "classes"]: - classdir = os.path.join(layerdir, c) - if os.path.exists(classdir): - for classfile in os.listdir(classdir): - if os.path.splitext(classfile)[1] == '.bbclass': - classes[classfile].append(classdir) - - # Locating classes and other files is a bit more complicated than recipes - - # layer priority is not a factor; instead BitBake uses the first matching - # file in BBPATH, which is manipulated directly by each layer's - # conf/layer.conf in turn, thus the order of layers in bblayers.conf is a - # factor - however, each layer.conf is free to either prepend or append to - # BBPATH (or indeed do crazy stuff with it). Thus the order in BBPATH might - # not be exactly the order present in bblayers.conf either. - bbpath = str(self.tinfoil.config_data.getVar('BBPATH')) - overlayed_class_found = False - for (classfile, classdirs) in classes.items(): - if len(classdirs) > 1: - if not overlayed_class_found: - logger.plain('=== Overlayed classes ===') - overlayed_class_found = True - - mainfile = bb.utils.which(bbpath, os.path.join('classes', classfile)) - if args.filenames: - logger.plain('%s' % mainfile) - else: - # We effectively have to guess the layer here - logger.plain('%s:' % classfile) - mainlayername = '?' - for layerdir in self.bblayers: - classdir = os.path.join(layerdir, 'classes') - if mainfile.startswith(classdir): - mainlayername = self.get_layer_name(layerdir) - logger.plain(' %s' % mainlayername) - for classdir in classdirs: - fullpath = os.path.join(classdir, classfile) - if fullpath != mainfile: - if args.filenames: - print(' %s' % fullpath) - else: - print(' %s' % self.get_layer_name(os.path.dirname(classdir))) - - if overlayed_class_found: - items_listed = True; - - if not items_listed: - logger.plain('No overlayed files found.') - - def do_show_recipes(self, args): - """list available recipes, showing the layer they are provided by - -Lists the names of recipes and the available versions in each -layer, with the preferred version first. Optionally you may specify -pnspec to match a specified recipe name (supports wildcards). Note that -skipped recipes will also be listed, with a " (skipped)" suffix. -""" - - inheritlist = args.inherits.split(',') if args.inherits else [] - if inheritlist or args.pnspec or args.multiple: - title = 'Matching recipes:' - else: - title = 'Available recipes:' - self.list_recipes(title, args.pnspec, False, False, args.filenames, args.recipes_only, args.multiple, args.layer, args.bare, inheritlist, args.mc) - - def list_recipes(self, title, pnspec, show_overlayed_only, show_same_ver_only, show_filenames, show_recipes_only, show_multi_provider_only, selected_layer, bare, inherits, mc): - if inherits: - bbpath = str(self.tinfoil.config_data.getVar('BBPATH')) - for classname in inherits: - found = False - for c in ["classes-global", "classes-recipe", "classes"]: - cfile = c + '/%s.bbclass' % classname - if bb.utils.which(bbpath, cfile, history=False): - found = True - break - if not found: - logger.error('No class named %s found in BBPATH', classname) - sys.exit(1) - - pkg_pn = self.tinfoil.cooker.recipecaches[mc].pkg_pn - (latest_versions, preferred_versions, required_versions) = self.tinfoil.find_providers(mc) - allproviders = self.tinfoil.get_all_providers(mc) - - # Ensure we list skipped recipes - # We are largely guessing about PN, PV and the preferred version here, - # but we have no choice since skipped recipes are not fully parsed - skiplist = list(self.tinfoil.cooker.skiplist_by_mc[mc].keys()) - - if mc: - skiplist = [s.removeprefix(f'mc:{mc}:') for s in skiplist] - - for fn in skiplist: - recipe_parts = os.path.splitext(os.path.basename(fn))[0].split('_') - p = recipe_parts[0] - if len(recipe_parts) > 1: - ver = (None, recipe_parts[1], None) - else: - ver = (None, 'unknown', None) - allproviders[p].append((ver, fn)) - if not p in pkg_pn: - pkg_pn[p] = 'dummy' - preferred_versions[p] = (ver, fn) - - def print_item(f, pn, ver, layer, ispref): - if not selected_layer or layer == selected_layer: - if not bare and f in skiplist: - skipped = ' (skipped: %s)' % self.tinfoil.cooker.skiplist_by_mc[mc][f].skipreason - else: - skipped = '' - if show_filenames: - if ispref: - logger.plain("%s%s", f, skipped) - else: - logger.plain(" %s%s", f, skipped) - elif show_recipes_only: - if pn not in show_unique_pn: - show_unique_pn.append(pn) - logger.plain("%s%s", pn, skipped) - else: - if ispref: - logger.plain("%s:", pn) - logger.plain(" %s %s%s", layer.ljust(20), ver, skipped) - - global_inherit = (self.tinfoil.config_data.getVar('INHERIT') or "").split() - cls_re = re.compile('classes.*/') - - preffiles = [] - show_unique_pn = [] - items_listed = False - for p in sorted(pkg_pn): - if pnspec: - found=False - for pnm in pnspec: - if fnmatch.fnmatch(p, pnm): - found=True - break - if not found: - continue - - if len(allproviders[p]) > 1 or not show_multi_provider_only: - pref = preferred_versions[p] - realfn = bb.cache.virtualfn2realfn(pref[1]) - preffile = realfn[0] - - # We only display once per recipe, we should prefer non extended versions of the - # recipe if present (so e.g. in OpenEmbedded, openssl rather than nativesdk-openssl - # which would otherwise sort first). - if realfn[1] and realfn[0] in self.tinfoil.cooker.recipecaches[mc].pkg_fn: - continue - - if inherits: - matchcount = 0 - recipe_inherits = self.tinfoil.cooker_data.inherits.get(preffile, []) - for cls in recipe_inherits: - if cls_re.match(cls): - continue - classname = os.path.splitext(os.path.basename(cls))[0] - if classname in global_inherit: - continue - elif classname in inherits: - matchcount += 1 - if matchcount != len(inherits): - # No match - skip this recipe - continue - - if preffile not in preffiles: - preflayer = self.get_file_layer(preffile) - multilayer = False - same_ver = True - provs = [] - for prov in allproviders[p]: - provfile = bb.cache.virtualfn2realfn(prov[1])[0] - provlayer = self.get_file_layer(provfile) - provs.append((provfile, provlayer, prov[0])) - if provlayer != preflayer: - multilayer = True - if prov[0] != pref[0]: - same_ver = False - if (multilayer or not show_overlayed_only) and (same_ver or not show_same_ver_only): - if not items_listed: - logger.plain('=== %s ===' % title) - items_listed = True - print_item(preffile, p, self.version_str(pref[0][0], pref[0][1]), preflayer, True) - for (provfile, provlayer, provver) in provs: - if provfile != preffile: - print_item(provfile, p, self.version_str(provver[0], provver[1]), provlayer, False) - # Ensure we don't show two entries for BBCLASSEXTENDed recipes - preffiles.append(preffile) - - return items_listed - - def get_file_layer(self, filename): - layerdir = self.get_file_layerdir(filename) - if layerdir: - return self.get_layer_name(layerdir) - else: - return '?' - - def get_collection_res(self): - if not self.collection_res: - self.collection_res = bb.utils.get_collection_res(self.tinfoil.config_data) - return self.collection_res - - def get_file_layerdir(self, filename): - layer = bb.utils.get_file_layer(filename, self.tinfoil.config_data, self.get_collection_res()) - return self.bbfile_collections.get(layer, None) - - def remove_layer_prefix(self, f): - """Remove the layer_dir prefix, e.g., f = /path/to/layer_dir/foo/blah, the - return value will be: layer_dir/foo/blah""" - f_layerdir = self.get_file_layerdir(f) - if not f_layerdir: - return f - prefix = os.path.join(os.path.dirname(f_layerdir), '') - return f[len(prefix):] if f.startswith(prefix) else f - - def do_show_appends(self, args): - """list bbappend files and recipe files they apply to - -Lists recipes with the bbappends that apply to them as subitems. -""" - if args.pnspec: - logger.plain('=== Matched appended recipes ===') - else: - logger.plain('=== Appended recipes ===') - - - cooker_data = self.tinfoil.cooker.recipecaches[args.mc] - - pnlist = list(cooker_data.pkg_pn.keys()) - pnlist.sort() - appends = False - for pn in pnlist: - if args.pnspec: - found=False - for pnm in args.pnspec: - if fnmatch.fnmatch(pn, pnm): - found=True - break - if not found: - continue - - if self.show_appends_for_pn(pn, cooker_data, args.mc): - appends = True - - if not args.pnspec and self.show_appends_for_skipped(args.mc): - appends = True - - if not appends: - logger.plain('No append files found') - - def show_appends_for_pn(self, pn, cooker_data, mc): - filenames = cooker_data.pkg_pn[pn] - if mc: - pn = "mc:%s:%s" % (mc, pn) - - best = self.tinfoil.find_best_provider(pn) - best_filename = os.path.basename(best[3]) - - return self.show_appends_output(filenames, best_filename) - - def show_appends_for_skipped(self, mc): - filenames = [os.path.basename(f) - for f in self.tinfoil.cooker.skiplist_by_mc[mc].keys()] - return self.show_appends_output(filenames, None, " (skipped)") - - def show_appends_output(self, filenames, best_filename, name_suffix = ''): - appended, missing = self.get_appends_for_files(filenames) - if appended: - for basename, appends in appended: - logger.plain('%s%s:', basename, name_suffix) - for append in appends: - logger.plain(' %s', append) - - if best_filename: - if best_filename in missing: - logger.warning('%s: missing append for preferred version', - best_filename) - return True - else: - return False - - def get_appends_for_files(self, filenames): - appended, notappended = [], [] - for filename in filenames: - _, cls, mc = bb.cache.virtualfn2realfn(filename) - if cls: - continue - - basename = os.path.basename(filename) - appends = self.tinfoil.cooker.collections[mc].get_file_appends(basename) - if appends: - appended.append((basename, list(appends))) - else: - notappended.append(basename) - return appended, notappended - - def do_show_cross_depends(self, args): - """Show dependencies between recipes that cross layer boundaries. - -Figure out the dependencies between recipes that cross layer boundaries. - -NOTE: .bbappend files can impact the dependencies. -""" - ignore_layers = (args.ignore or '').split(',') - - pkg_fn = self.tinfoil.cooker_data.pkg_fn - bbpath = str(self.tinfoil.config_data.getVar('BBPATH')) - self.require_re = re.compile(r"require\s+(.+)") - self.include_re = re.compile(r"include\s+(.+)") - self.inherit_re = re.compile(r"inherit\s+(.+)") - - global_inherit = (self.tinfoil.config_data.getVar('INHERIT') or "").split() - - # The bb's DEPENDS and RDEPENDS - for f in pkg_fn: - f = bb.cache.virtualfn2realfn(f)[0] - # Get the layername that the file is in - layername = self.get_file_layer(f) - - # The DEPENDS - deps = self.tinfoil.cooker_data.deps[f] - for pn in deps: - if pn in self.tinfoil.cooker_data.pkg_pn: - best = self.tinfoil.find_best_provider(pn) - self.check_cross_depends("DEPENDS", layername, f, best[3], args.filenames, ignore_layers) - - # The RDPENDS - all_rdeps = self.tinfoil.cooker_data.rundeps[f].values() - # Remove the duplicated or null one. - sorted_rdeps = {} - # The all_rdeps is the list in list, so we need two for loops - for k1 in all_rdeps: - for k2 in k1: - sorted_rdeps[k2] = 1 - all_rdeps = sorted_rdeps.keys() - for rdep in all_rdeps: - all_p, best = self.tinfoil.get_runtime_providers(rdep) - if all_p: - if f in all_p: - # The recipe provides this one itself, ignore - continue - self.check_cross_depends("RDEPENDS", layername, f, best, args.filenames, ignore_layers) - - # The RRECOMMENDS - all_rrecs = self.tinfoil.cooker_data.runrecs[f].values() - # Remove the duplicated or null one. - sorted_rrecs = {} - # The all_rrecs is the list in list, so we need two for loops - for k1 in all_rrecs: - for k2 in k1: - sorted_rrecs[k2] = 1 - all_rrecs = sorted_rrecs.keys() - for rrec in all_rrecs: - all_p, best = self.tinfoil.get_runtime_providers(rrec) - if all_p: - if f in all_p: - # The recipe provides this one itself, ignore - continue - self.check_cross_depends("RRECOMMENDS", layername, f, best, args.filenames, ignore_layers) - - # The inherit class - cls_re = re.compile('classes.*/') - if f in self.tinfoil.cooker_data.inherits: - inherits = self.tinfoil.cooker_data.inherits[f] - for cls in inherits: - # The inherits' format is [classes/cls, /path/to/classes/cls] - # ignore the classes/cls. - if not cls_re.match(cls): - classname = os.path.splitext(os.path.basename(cls))[0] - if classname in global_inherit: - continue - inherit_layername = self.get_file_layer(cls) - if inherit_layername != layername and not inherit_layername in ignore_layers: - if not args.filenames: - f_short = self.remove_layer_prefix(f) - cls = self.remove_layer_prefix(cls) - else: - f_short = f - logger.plain("%s inherits %s" % (f_short, cls)) - - # The 'require/include xxx' in the bb file - pv_re = re.compile(r"\${PV}") - with open(f, 'r') as fnfile: - line = fnfile.readline() - while line: - m, keyword = self.match_require_include(line) - # Found the 'require/include xxxx' - if m: - needed_file = m.group(1) - # Replace the ${PV} with the real PV - if pv_re.search(needed_file) and f in self.tinfoil.cooker_data.pkg_pepvpr: - pv = self.tinfoil.cooker_data.pkg_pepvpr[f][1] - needed_file = re.sub(r"\${PV}", pv, needed_file) - self.print_cross_files(bbpath, keyword, layername, f, needed_file, args.filenames, ignore_layers) - line = fnfile.readline() - - # The "require/include xxx" in conf/machine/*.conf, .inc and .bbclass - conf_re = re.compile(r".*/conf/machine/[^\/]*\.conf$") - inc_re = re.compile(r".*\.inc$") - # The "inherit xxx" in .bbclass - bbclass_re = re.compile(r".*\.bbclass$") - for layerdir in self.bblayers: - layername = self.get_layer_name(layerdir) - for dirpath, dirnames, filenames in os.walk(layerdir): - for name in filenames: - f = os.path.join(dirpath, name) - s = conf_re.match(f) or inc_re.match(f) or bbclass_re.match(f) - if s: - with open(f, 'r') as ffile: - line = ffile.readline() - while line: - m, keyword = self.match_require_include(line) - # Only bbclass has the "inherit xxx" here. - bbclass="" - if not m and f.endswith(".bbclass"): - m, keyword = self.match_inherit(line) - bbclass=".bbclass" - # Find a 'require/include xxxx' - if m: - self.print_cross_files(bbpath, keyword, layername, f, m.group(1) + bbclass, args.filenames, ignore_layers) - line = ffile.readline() - - def print_cross_files(self, bbpath, keyword, layername, f, needed_filename, show_filenames, ignore_layers): - """Print the depends that crosses a layer boundary""" - needed_file = bb.utils.which(bbpath, needed_filename) - if needed_file: - # Which layer is this file from - needed_layername = self.get_file_layer(needed_file) - if needed_layername != layername and not needed_layername in ignore_layers: - if not show_filenames: - f = self.remove_layer_prefix(f) - needed_file = self.remove_layer_prefix(needed_file) - logger.plain("%s %s %s" %(f, keyword, needed_file)) - - def match_inherit(self, line): - """Match the inherit xxx line""" - return (self.inherit_re.match(line), "inherits") - - def match_require_include(self, line): - """Match the require/include xxx line""" - m = self.require_re.match(line) - keyword = "requires" - if not m: - m = self.include_re.match(line) - keyword = "includes" - return (m, keyword) - - def check_cross_depends(self, keyword, layername, f, needed_file, show_filenames, ignore_layers): - """Print the DEPENDS/RDEPENDS file that crosses a layer boundary""" - best_realfn = bb.cache.virtualfn2realfn(needed_file)[0] - needed_layername = self.get_file_layer(best_realfn) - if needed_layername != layername and not needed_layername in ignore_layers: - if not show_filenames: - f = self.remove_layer_prefix(f) - best_realfn = self.remove_layer_prefix(best_realfn) - - logger.plain("%s %s %s" % (f, keyword, best_realfn)) - - def register_commands(self, sp): - self.add_command(sp, 'show-layers', self.do_show_layers, parserecipes=False) - - parser_show_overlayed = self.add_command(sp, 'show-overlayed', self.do_show_overlayed) - parser_show_overlayed.add_argument('-f', '--filenames', help='instead of the default formatting, list filenames of higher priority recipes with the ones they overlay indented underneath', action='store_true') - parser_show_overlayed.add_argument('-s', '--same-version', help='only list overlayed recipes where the version is the same', action='store_true') - parser_show_overlayed.add_argument('--mc', help='use specified multiconfig', default='') - - parser_show_recipes = self.add_command(sp, 'show-recipes', self.do_show_recipes) - parser_show_recipes.add_argument('-f', '--filenames', help='instead of the default formatting, list filenames of higher priority recipes with the ones they overlay indented underneath', action='store_true') - parser_show_recipes.add_argument('-r', '--recipes-only', help='instead of the default formatting, list recipes only', action='store_true') - parser_show_recipes.add_argument('-m', '--multiple', help='only list where multiple recipes (in the same layer or different layers) exist for the same recipe name', action='store_true') - parser_show_recipes.add_argument('-i', '--inherits', help='only list recipes that inherit the named class(es) - separate multiple classes using , (without spaces)', metavar='CLASS', default='') - parser_show_recipes.add_argument('-l', '--layer', help='only list recipes from the selected layer', default='') - parser_show_recipes.add_argument('-b', '--bare', help='output just names without the "(skipped)" marker', action='store_true') - parser_show_recipes.add_argument('--mc', help='use specified multiconfig', default='') - parser_show_recipes.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)') - - parser_show_appends = self.add_command(sp, 'show-appends', self.do_show_appends) - parser_show_appends.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)') - parser_show_appends.add_argument('--mc', help='use specified multiconfig', default='') - - parser_show_cross_depends = self.add_command(sp, 'show-cross-depends', self.do_show_cross_depends) - parser_show_cross_depends.add_argument('-f', '--filenames', help='show full file path', action='store_true') - parser_show_cross_depends.add_argument('-i', '--ignore', help='ignore dependencies on items in the specified layer(s) (split multiple layer names with commas, no spaces)', metavar='LAYERNAME') diff --git a/bitbake/lib/bs4/AUTHORS b/bitbake/lib/bs4/AUTHORS deleted file mode 100644 index 1f14fe07de..0000000000 --- a/bitbake/lib/bs4/AUTHORS +++ /dev/null @@ -1,49 +0,0 @@ -Behold, mortal, the origins of Beautiful Soup... -================================================ - -Leonard Richardson is the primary maintainer. - -Aaron DeVore and Isaac Muse have made significant contributions to the -code base. - -Mark Pilgrim provided the encoding detection code that forms the base -of UnicodeDammit. - -Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful -Soup 4 working under Python 3. - -Simon Willison wrote soupselect, which was used to make Beautiful Soup -support CSS selectors. Isaac Muse wrote SoupSieve, which made it -possible to _remove_ the CSS selector code from Beautiful Soup. - -Sam Ruby helped with a lot of edge cases. - -Jonathan Ellis was awarded the prestigious Beau Potage D'Or for his -work in solving the nestable tags conundrum. - -An incomplete list of people have contributed patches to Beautiful -Soup: - - Istvan Albert, Andrew Lin, Anthony Baxter, Oliver Beattie, Andrew -Boyko, Tony Chang, Francisco Canas, "Delong", Zephyr Fang, Fuzzy, -Roman Gaufman, Yoni Gilad, Richie Hindle, Toshihiro Kamiya, Peteris -Krumins, Kent Johnson, Marek Kapolka, Andreas Kostyrka, Roel Kramer, -Ben Last, Robert Leftwich, Stefaan Lippens, "liquider", Staffan -Malmgren, Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon", -Ed Oskiewicz, Martijn Peters, Greg Phillips, Giles Radford, Stefano -Revera, Arthur Rudolph, Marko Samastur, James Salter, Jouni Seppnen, -Alexander Schmolck, Tim Shirley, Geoffrey Sneddon, Ville Skytt, -"Vikas", Jens Svalgaard, Andy Theyers, Eric Weiser, Glyn Webster, John -Wiseman, Paul Wright, Danny Yoo - -An incomplete list of people who made suggestions or found bugs or -found ways to break Beautiful Soup: - - Hanno Bck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel, - Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes, - Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams, - warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison, - Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed - Summers, Dennis Sutch, Chris Smith, Aaron Swartz, Stuart - Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de - Sousa Rocha, Yichun Wei, Per Vognsen diff --git a/bitbake/lib/bs4/CHANGELOG b/bitbake/lib/bs4/CHANGELOG deleted file mode 100644 index 2701446a6d..0000000000 --- a/bitbake/lib/bs4/CHANGELOG +++ /dev/null @@ -1,1839 +0,0 @@ -= 4.12.3 (20240117) - -* The Beautiful Soup documentation now has a Spanish translation, thanks - to Carlos Romero. Delong Wang's Chinese translation has been updated - to cover Beautiful Soup 4.12.0. - -* Fixed a regression such that if you set .hidden on a tag, the tag - becomes invisible but its contents are still visible. User manipulation - of .hidden is not a documented or supported feature, so don't do this, - but it wasn't too difficult to keep the old behavior working. - -* Fixed a case found by Mengyuhan where html.parser giving up on - markup would result in an AssertionError instead of a - ParserRejectedMarkup exception. - -* Added the correct stacklevel to instances of the XMLParsedAsHTMLWarning. - [bug=2034451] - -* Corrected the syntax of the license definition in pyproject.toml. Patch - by Louis Maddox. [bug=2032848] - -* Corrected a typo in a test that was causing test failures when run against - libxml2 2.12.1. [bug=2045481] - -= 4.12.2 (20230407) - -* Fixed an unhandled exception in BeautifulSoup.decode_contents - and methods that call it. [bug=2015545] - -= 4.12.1 (20230405) - -NOTE: the following things are likely to be dropped in the next -feature release of Beautiful Soup: - - Official support for Python 3.6. - Inclusion of unit tests and test data in the wheel file. - Two scripts: demonstrate_parser_differences.py and test-all-versions. - -Changes: - -* This version of Beautiful Soup replaces setup.py and setup.cfg - with pyproject.toml. Beautiful Soup now uses tox as its test backend - and hatch to do builds. - -* The main functional improvement in this version is a nonrecursive technique - for regenerating a tree. This technique is used to avoid situations where, - in previous versions, doing something to a very deeply nested tree - would overflow the Python interpreter stack: - - 1. Outputting a tree as a string, e.g. with - BeautifulSoup.encode() [bug=1471755] - - 2. Making copies of trees (copy.copy() and - copy.deepcopy() from the Python standard library). [bug=1709837] - - 3. Pickling a BeautifulSoup object. (Note that pickling a Tag - object can still cause an overflow.) - -* Making a copy of a BeautifulSoup object no longer parses the - document again, which should improve performance significantly. - -* When a BeautifulSoup object is unpickled, Beautiful Soup now - tries to associate an appropriate TreeBuilder object with it. - -* Tag.prettify() will now consistently end prettified markup with - a newline. - -* Added unit tests for fuzz test cases created by third - parties. Some of these tests are skipped since they point - to problems outside of Beautiful Soup, but this change - puts them all in one convenient place. - -* PageElement now implements the known_xml attribute. (This was technically - a bug, but it shouldn't be an issue in normal use.) [bug=2007895] - -* The demonstrate_parser_differences.py script was still written in - Python 2. I've converted it to Python 3, but since no one has - mentioned this over the years, it's a sign that no one uses this - script and it's not serving its purpose. - -= 4.12.0 (20230320) - -* Introduced the .css property, which centralizes all access to - the Soup Sieve API. This allows Beautiful Soup to give direct - access to as much of Soup Sieve that makes sense, without cluttering - the BeautifulSoup and Tag classes with a lot of new methods. - - This does mean one addition to the BeautifulSoup and Tag classes - (the .css property itself), so this might be a breaking change if you - happen to use Beautiful Soup to parse XML that includes a tag called - . In particular, code like this will stop working in 4.12.0: - - soup.css['id'] - - Code like this will work just as before: - - soup.find_one('css')['id'] - - The Soup Sieve methods supported through the .css property are - select(), select_one(), iselect(), closest(), match(), filter(), - escape(), and compile(). The BeautifulSoup and Tag classes still - support the select() and select_one() methods; they have not been - deprecated, but they have been demoted to convenience methods. - - [bug=2003677] - -* When the html.parser parser decides it can't parse a document, Beautiful - Soup now consistently propagates this fact by raising a - ParserRejectedMarkup error. [bug=2007343] - -* Removed some error checking code from diagnose(), which is redundant with - similar (but more Pythonic) code in the BeautifulSoup constructor. - [bug=2007344] - -* Added intersphinx references to the documentation so that other - projects have a target to point to when they reference Beautiful - Soup classes. [bug=1453370] - -= 4.11.2 (20230131) - -* Fixed test failures caused by nondeterministic behavior of - UnicodeDammit's character detection, depending on the platform setup. - [bug=1973072] - -* Fixed another crash when overriding multi_valued_attributes and using the - html5lib parser. [bug=1948488] - -* The HTMLFormatter and XMLFormatter constructors no longer return a - value. [bug=1992693] - -* Tag.interesting_string_types is now propagated when a tag is - copied. [bug=1990400] - -* Warnings now do their best to provide an appropriate stacklevel, - improving the usefulness of the message. [bug=1978744] - -* Passing a Tag's .contents into PageElement.extend() now works the - same way as passing the Tag itself. - -* Soup Sieve tests will be skipped if the library is not installed. - -= 4.11.1 (20220408) - -This release was done to ensure that the unit tests are packaged along -with the released source. There are no functionality changes in this -release, but there are a few other packaging changes: - -* The Japanese and Korean translations of the documentation are included. -* The changelog is now packaged as CHANGELOG, and the license file is - packaged as LICENSE. NEWS.txt and COPYING.txt are still present, - but may be removed in the future. -* TODO.txt is no longer packaged, since a TODO is not relevant for released - code. - -= 4.11.0 (20220407) - -* Ported unit tests to use pytest. - -* Added special string classes, RubyParenthesisString and RubyTextString, - to make it possible to treat ruby text specially in get_text() calls. - [bug=1941980] - -* It's now possible to customize the way output is indented by - providing a value for the 'indent' argument to the Formatter - constructor. The 'indent' argument works very similarly to the - argument of the same name in the Python standard library's - json.dump() function. [bug=1955497] - -* If the charset-normalizer Python module - (https://pypi.org/project/charset-normalizer/) is installed, Beautiful - Soup will use it to detect the character sets of incoming documents. - This is also the module used by newer versions of the Requests library. - For the sake of backwards compatibility, chardet and cchardet both take - precedence if installed. [bug=1955346] - -* Added a workaround for an lxml bug - (https://bugs.launchpad.net/lxml/+bug/1948551) that causes - problems when parsing a Unicode string beginning with BYTE ORDER MARK. - [bug=1947768] - -* Issue a warning when an HTML parser is used to parse a document that - looks like XML but not XHTML. [bug=1939121] - -* Do a better job of keeping track of namespaces as an XML document is - parsed, so that CSS selectors that use namespaces will do the right - thing more often. [bug=1946243] - -* Some time ago, the misleadingly named "text" argument to find-type - methods was renamed to the more accurate "string." But this supposed - "renaming" didn't make it into important places like the method - signatures or the docstrings. That's corrected in this - version. "text" still works, but will give a DeprecationWarning. - [bug=1947038] - -* Fixed a crash when pickling a BeautifulSoup object that has no - tree builder. [bug=1934003] - -* Fixed a crash when overriding multi_valued_attributes and using the - html5lib parser. [bug=1948488] - -* Standardized the wording of the MarkupResemblesLocatorWarning - warnings to omit untrusted input and make the warnings less - judgmental about what you ought to be doing. [bug=1955450] - -* Removed support for the iconv_codec library, which doesn't seem - to exist anymore and was never put up on PyPI. (The closest - replacement on PyPI, iconv_codecs, is GPL-licensed, so we can't use - it--it's also quite old.) - -= 4.10.0 (20210907) - -* This is the first release of Beautiful Soup to only support Python - 3. I dropped Python 2 support to maintain support for newer versions - (58 and up) of setuptools. See: - https://github.com/pypa/setuptools/issues/2769 [bug=1942919] - -* The behavior of methods like .get_text() and .strings now differs - depending on the type of tag. The change is visible with HTML tags - like ",rE:!0,sL:["actionscript","javascript","handlebars","xml"]}},{cN:"meta",v:[{b:/<\?xml/,e:/\?>/,r:10},{b:/<\?\w+/,e:/\?>/}]},{cN:"tag",b:"",c:[{cN:"name",b:/[^\/><\s]+/,r:0},t]}]}});hljs.registerLanguage("markdown",function(e){return{aliases:["md","mkdown","mkd"],c:[{cN:"section",v:[{b:"^#{1,6}",e:"$"},{b:"^.+?\\n[=-]{2,}$"}]},{b:"<",e:">",sL:"xml",r:0},{cN:"bullet",b:"^([*+-]|(\\d+\\.))\\s+"},{cN:"strong",b:"[*_]{2}.+?[*_]{2}"},{cN:"emphasis",v:[{b:"\\*.+?\\*"},{b:"_.+?_",r:0}]},{cN:"quote",b:"^>\\s+",e:"$"},{cN:"code",v:[{b:"^```w*s*$",e:"^```s*$"},{b:"`.+?`"},{b:"^( {4}| )",e:"$",r:0}]},{b:"^[-\\*]{3,}",e:"$"},{b:"\\[.+?\\][\\(\\[].*?[\\)\\]]",rB:!0,c:[{cN:"string",b:"\\[",e:"\\]",eB:!0,rE:!0,r:0},{cN:"link",b:"\\]\\(",e:"\\)",eB:!0,eE:!0},{cN:"symbol",b:"\\]\\[",e:"\\]",eB:!0,eE:!0}],r:10},{b:/^\[[^\n]+\]:/,rB:!0,c:[{cN:"symbol",b:/\[/,e:/\]/,eB:!0,eE:!0},{cN:"link",b:/:\s*/,e:/$/,eB:!0}]}]}});hljs.registerLanguage("json",function(e){var i={literal:"true false null"},n=[e.QSM,e.CNM],r={e:",",eW:!0,eE:!0,c:n,k:i},t={b:"{",e:"}",c:[{cN:"attr",b:/"/,e:/"/,c:[e.BE],i:"\\n"},e.inherit(r,{b:/:/})],i:"\\S"},c={b:"\\[",e:"\\]",c:[e.inherit(r)],i:"\\S"};return n.splice(n.length,0,t,c),{c:n,k:i,i:"\\S"}}); \ No newline at end of file diff --git a/bitbake/lib/toaster/toastergui/static/js/importlayer.js b/bitbake/lib/toaster/toastergui/static/js/importlayer.js deleted file mode 100644 index 8e2032de26..0000000000 --- a/bitbake/lib/toaster/toastergui/static/js/importlayer.js +++ /dev/null @@ -1,455 +0,0 @@ -"use strict" - -function importLayerPageInit (ctx) { - - var layerDepBtn = $("#add-layer-dependency-btn"); - var importAndAddBtn = $("#import-and-add-btn"); - var layerNameInput = $("#import-layer-name"); - var vcsURLInput = $("#layer-git-repo-url"); - var gitRefInput = $("#layer-git-ref"); - var layerDepInput = $("#layer-dependency"); - var layerNameCtrl = $("#layer-name-ctrl"); - var duplicatedLayerName = $("#duplicated-layer-name-hint"); - var localDirPath = $("#local-dir-path"); - - var layerDeps = {}; - var layerDepsDeps = {}; - var currentLayerDepSelection; - var validLayerName = /^(\w|-)+$/; - - /* Catch 'disable' race condition between type-ahead started and "input change" */ - var typeAheadStarted = 0; - - libtoaster.makeTypeahead(layerDepInput, - libtoaster.ctx.layersTypeAheadUrl, - { include_added: "true" }, function(item){ - currentLayerDepSelection = item; - layerDepBtn.removeAttr("disabled"); - typeAheadStarted = 1; - }); - - layerDepInput.on("typeahead:select", function(event, data){ - currentLayerDepSelection = data; - }); - - // Disable local dir repo when page is loaded. - $('#local-dir').hide(); - - // disable the "Add layer" button when the layer input typeahead is empty - // or not in the typeahead choices - layerDepInput.on("input change", function(){ - if (0 == typeAheadStarted) { - layerDepBtn.attr("disabled","disabled"); - } - typeAheadStarted = 0; - }); - - /* We automatically add "openembedded-core" layer for convenience as a - * dependency as pretty much all layers depend on this one - */ - $.getJSON(libtoaster.ctx.layersTypeAheadUrl, - { include_added: "true" , search: "openembedded-core" }, - function(layer) { - if (layer.results.length > 0) { - currentLayerDepSelection = layer.results[0]; - layerDepBtn.click(); - } - }); - - layerDepBtn.click(function(){ - typeAheadStarted = 0; - if (currentLayerDepSelection == undefined) - return; - - layerDeps[currentLayerDepSelection.id] = currentLayerDepSelection; - - /* Make a list item for the new layer dependency */ - var newLayerDep = $("
  • "); - - newLayerDep.data('layer-id', currentLayerDepSelection.id); - newLayerDep.children("span").tooltip(); - - var link = newLayerDep.children("a"); - link.attr("href", currentLayerDepSelection.layerdetailurl); - link.text(currentLayerDepSelection.name); - link.tooltip({title: currentLayerDepSelection.tooltip, placement: "right"}); - - var trashItem = newLayerDep.children("span"); - trashItem.click(function () { - var toRemove = $(this).parent().data('layer-id'); - delete layerDeps[toRemove]; - $(this).parent().fadeOut(function (){ - $(this).remove(); - }); - }); - - $("#layer-deps-list").append(newLayerDep); - - libtoaster.getLayerDepsForProject(currentLayerDepSelection.xhrLayerUrl, - function (data){ - /* These are the dependencies of the layer added as a dependency */ - if (data.list.length > 0) { - currentLayerDepSelection.url = currentLayerDepSelection.layerdetailurl; - layerDeps[currentLayerDepSelection.id].deps = data.list; - } - - /* Clear the current selection */ - layerDepInput.val(""); - currentLayerDepSelection = undefined; - layerDepBtn.attr("disabled","disabled"); - }, null); - }); - - importAndAddBtn.click(function(e){ - e.preventDefault(); - /* This is a list of the names from layerDeps for the layer deps - * modal dialog body - */ - var depNames = []; - - /* arrray of all layer dep ids includes parent and child deps */ - var allDeps = []; - - /* temporary object to use to do a reduce on the dependencies for each - * layer dependency added - */ - var depDeps = {}; - - /* the layers that have dependencies have an extra property "deps" - * look in this for each layer and reduce this to a unquie object - * of deps. - */ - for (var key in layerDeps){ - if (layerDeps[key].hasOwnProperty('deps')){ - for (var dep in layerDeps[key].deps){ - var layer = layerDeps[key].deps[dep]; - depDeps[layer.id] = layer; - } - } - depNames.push(layerDeps[key].name); - allDeps.push(layerDeps[key].id); - } - - /* we actually want it as an array so convert it now */ - var depDepsArray = []; - for (var key in depDeps) - depDepsArray.push (depDeps[key]); - - if (depDepsArray.length > 0) { - var layer = { name: layerNameInput.val(), url: "#", id: -1 }; - var title = "Layer"; - var body = ""+layer.name+"'s dependencies ("+ - depNames.join(", ")+") require some layers that are not added to your project. Select the ones you want to add:

    "; - - showLayerDepsModal(layer, - depDepsArray, - title, body, false, function(layerObsList){ - /* Add the accepted layer dependencies' ids to the allDeps array */ - for (var key in layerObsList){ - allDeps.push(layerObsList[key].id); - } - import_and_add (); - }); - } else { - import_and_add (); - } - - function import_and_add () { - /* convert to a csv of all the deps to be added */ - var layerDepsCsv = allDeps.join(","); - - var layerData = { - name: layerNameInput.val(), - vcs_url: vcsURLInput.val(), - git_ref: gitRefInput.val(), - dir_path: $("#layer-subdir").val(), - project_id: libtoaster.ctx.projectId, - layer_deps: layerDepsCsv, - local_source_dir: $('#local-dir-path').val(), - add_to_project: true, - }; - - if ($('input[name=repo]:checked').val() == "git") { - layerData.local_source_dir = ""; - } else { - layerData.vcs_url = ""; - layerData.git_ref = ""; - } - - $.ajax({ - type: "PUT", - url: ctx.xhrLayerUrl, - data: JSON.stringify(layerData), - headers: { 'X-CSRFToken' : $.cookie('csrftoken')}, - success: function (data) { - if (data.error != "ok") { - console.log(data.error); - /* let the user know why nothing happened */ - alert(data.error) - } else { - createImportedNotification(data); - window.location.replace(libtoaster.ctx.projectPageUrl); - } - }, - error: function (data) { - console.log("Call failed"); - console.log(data); - } - }); - } - }); - - /* Layer imported notification */ - function createImportedNotification(imported){ - var message = "Layer imported"; - - if (imported.deps_added.length === 0) { - message = "You have imported "+imported.imported_layer.name+" and added it to your project."; - } else { - - var links = ""+imported.imported_layer.name+", "; - - imported.deps_added.map (function(item, index){ - links +=''+item.name+''; - /*If we're at the last element we don't want the trailing comma */ - if (imported.deps_added[index+1] !== undefined) - links += ', '; - }); - - /* Length + 1 here to do deps + the imported layer */ - message = 'You have imported '+imported.imported_layer.name+' and added '+(imported.deps_added.length+1)+' layers to your project: '+links+''; - } - - libtoaster.setNotification("layer-imported", message); - } - - function enable_import_btn(enabled) { - var importAndAddHint = $("#import-and-add-hint"); - - if (enabled) { - importAndAddBtn.removeAttr("disabled"); - importAndAddHint.hide(); - return; - } - - importAndAddBtn.attr("disabled", "disabled"); - importAndAddHint.show(); - } - - function check_form() { - var valid = false; - var inputs = $("input:required"); - var inputStr = inputs.val().split(""); - - for (var i=0; i 0) { - enable_import_btn(true); - } - - if ($("#git-repo-radio").prop("checked")) { - if (gitRefInput.val().length > 0 && - gitRefInput.val() == 'HEAD') { - $('#invalid-layer-revision-hint').show(); - $('#layer-revision-ctrl').addClass('has-error'); - enable_import_btn(false); - } else if (vcsURLInput.val().length > 0 && - gitRefInput.val().length > 0) { - $('#invalid-layer-revision-hint').hide(); - $('#layer-revision-ctrl').removeClass('has-error'); - enable_import_btn(true); - } - } - } - - if (inputs.val().length == 0) - enable_import_btn(false); - } - - function layerExistsError(layer){ - var dupLayerInfo = $("#duplicate-layer-info"); - - if (layer.local_source_dir) { - $("#git-layer-dup").hide(); - $("#local-layer-dup").fadeIn(); - dupLayerInfo.find(".dup-layer-name").text(layer.name); - dupLayerInfo.find(".dup-layer-link").attr("href", layer.layerdetailurl); - dupLayerInfo.find("#dup-local-source-dir-name").text(layer.local_source_dir); - } else { - $("#git-layer-dup").fadeIn(); - $("#local-layer-dup").hide(); - dupLayerInfo.find(".dup-layer-name").text(layer.name); - dupLayerInfo.find(".dup-layer-link").attr("href", layer.layerdetailurl); - dupLayerInfo.find("#dup-layer-vcs-url").text(layer.vcs_url); - dupLayerInfo.find("#dup-layer-revision").text(layer.vcs_reference); - } - $(".fields-apart-from-layer-name").fadeOut(function(){ - - dupLayerInfo.fadeIn(); - }); - } - - layerNameInput.on('blur', function() { - if (!$(this).val()){ - return; - } - var name = $(this).val(); - - /* Check if the layer name exists */ - $.getJSON(libtoaster.ctx.layersTypeAheadUrl, - { include_added: "true" , search: name, format: "json" }, - function(layer) { - if (layer.results.length > 0) { - for (var i in layer.results){ - if (layer.results[i].name == name) { - layerExistsError(layer.results[i]); - } - } - } - }); - }); - - vcsURLInput.on('input', function() { - check_form(); - }); - - gitRefInput.on('input', function() { - check_form(); - }); - - layerNameInput.on('input', function() { - if ($(this).val() && !validLayerName.test($(this).val())){ - layerNameCtrl.addClass("has-error") - $("#invalid-layer-name-hint").show(); - enable_import_btn(false); - return; - } - - if ($("#duplicate-layer-info").css("display") != "None"){ - $("#duplicate-layer-info").fadeOut(function(){ - $(".fields-apart-from-layer-name").show(); - radioDisplay(); - }); - - } - - radioDisplay(); - - /* Don't remove the error class if we're displaying the error for another - * reason. - */ - if (!duplicatedLayerName.is(":visible")) - layerNameCtrl.removeClass("has-error") - - $("#invalid-layer-name-hint").hide(); - check_form(); - }); - - /* Setup 'blank' typeahead */ - libtoaster.makeTypeahead(gitRefInput, - ctx.xhrGitRevTypeAheadUrl, - { git_url: null }, function(){}); - - - vcsURLInput.focusout(function (){ - if (!$(this).val()) - return; - - /* If we a layer name specified don't overwrite it or if there isn't a - * url typed in yet return - */ - if (!layerNameInput.val() && $(this).val().search("/")){ - var urlPts = $(this).val().split("/"); - /* Add a suggestion of the layer name */ - var suggestion = urlPts[urlPts.length-1].replace(".git",""); - layerNameInput.val(suggestion); - } - - /* Now actually setup the typeahead properly with the git url entered */ - gitRefInput._typeahead('destroy'); - - libtoaster.makeTypeahead(gitRefInput, - ctx.xhrGitRevTypeAheadUrl, - { git_url: $(this).val() }, - function(selected){ - gitRefInput._typeahead("close"); - }); - - }); - - function radioDisplay() { - if ($('input[name=repo]:checked').val() == "local") { - $('#git-repo').hide(); - $('#import-git-layer-and-add-hint').hide(); - $('#local-dir').fadeIn(); - $('#import-local-dir-and-add-hint').fadeIn(); - } else { - $('#local-dir').hide(); - $('#import-local-dir-and-add-hint').hide(); - $('#git-repo').fadeIn(); - $('#import-git-layer-and-add-hint').fadeIn(); - } - } - - $('input:radio[name="repo"]').change(function() { - radioDisplay(); - if ($("#local-dir-radio").prop("checked")) { - if (localDirPath.val().length > 0) { - enable_import_btn(true); - } else { - enable_import_btn(false); - } - } - if ($("#git-repo-radio").prop("checked")) { - if (vcsURLInput.val().length > 0 && gitRefInput.val().length > 0) { - enable_import_btn(true); - } else { - enable_import_btn(false); - } - } - }); - - localDirPath.on('input', function(){ - if ($(this).val().trim().length == 0) { - $('#import-and-add-btn').attr("disabled","disabled"); - $('#local-dir').addClass('has-error'); - $('#hintError-dir-abs-path').show(); - $('#hintError-dir-path-starts-with-slash').show(); - } else { - var input = $(this); - var reBeginWithSlash = /^\//; - var reCheckVariable = /^\$/; - var re = /([ <>\\|":%\?\*]+)/; - - var invalidDir = re.test(input.val()); - var invalidSlash = reBeginWithSlash.test(input.val()); - var invalidVar = reCheckVariable.test(input.val()); - - if (!invalidSlash && !invalidVar) { - $('#local-dir').addClass('has-error'); - $('#import-and-add-btn').attr("disabled","disabled"); - $('#hintError-dir-abs-path').show(); - $('#hintError-dir-path-starts-with-slash').show(); - } else if (invalidDir) { - $('#local-dir').addClass('has-error'); - $('#import-and-add-btn').attr("disabled","disabled"); - $('#hintError-dir-path').show(); - } else { - $('#local-dir').removeClass('has-error'); - if (layerNameInput.val().length > 0) { - $('#import-and-add-btn').removeAttr("disabled"); - } - $('#hintError-dir-abs-path').hide(); - $('#hintError-dir-path-starts-with-slash').hide(); - $('#hintError-dir-path').hide(); - } - } - }); -} diff --git a/bitbake/lib/toaster/toastergui/static/js/jquery-2.0.3.min.js b/bitbake/lib/toaster/toastergui/static/js/jquery-2.0.3.min.js deleted file mode 100644 index 2be209dd22..0000000000 --- a/bitbake/lib/toaster/toastergui/static/js/jquery-2.0.3.min.js +++ /dev/null @@ -1,6 +0,0 @@ -/*! jQuery v2.0.3 | (c) 2005, 2013 jQuery Foundation, Inc. | jquery.org/license -//@ sourceMappingURL=jquery-2.0.3.min.map -*/ -(function(e,undefined){var t,n,r=typeof undefined,i=e.location,o=e.document,s=o.documentElement,a=e.jQuery,u=e.$,l={},c=[],p="2.0.3",f=c.concat,h=c.push,d=c.slice,g=c.indexOf,m=l.toString,y=l.hasOwnProperty,v=p.trim,x=function(e,n){return new x.fn.init(e,n,t)},b=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,w=/\S+/g,T=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,k=/^-ms-/,N=/-([\da-z])/gi,E=function(e,t){return t.toUpperCase()},S=function(){o.removeEventListener("DOMContentLoaded",S,!1),e.removeEventListener("load",S,!1),x.ready()};x.fn=x.prototype={jquery:p,constructor:x,init:function(e,t,n){var r,i;if(!e)return this;if("string"==typeof e){if(r="<"===e.charAt(0)&&">"===e.charAt(e.length-1)&&e.length>=3?[null,e,null]:T.exec(e),!r||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof x?t[0]:t,x.merge(this,x.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:o,!0)),C.test(r[1])&&x.isPlainObject(t))for(r in t)x.isFunction(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return i=o.getElementById(r[2]),i&&i.parentNode&&(this.length=1,this[0]=i),this.context=o,this.selector=e,this}return e.nodeType?(this.context=this[0]=e,this.length=1,this):x.isFunction(e)?n.ready(e):(e.selector!==undefined&&(this.selector=e.selector,this.context=e.context),x.makeArray(e,this))},selector:"",length:0,toArray:function(){return d.call(this)},get:function(e){return null==e?this.toArray():0>e?this[this.length+e]:this[e]},pushStack:function(e){var t=x.merge(this.constructor(),e);return t.prevObject=this,t.context=this.context,t},each:function(e,t){return x.each(this,e,t)},ready:function(e){return x.ready.promise().done(e),this},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(0>e?t:0);return this.pushStack(n>=0&&t>n?[this[n]]:[])},map:function(e){return this.pushStack(x.map(this,function(t,n){return e.call(t,n,t)}))},end:function(){return this.prevObject||this.constructor(null)},push:h,sort:[].sort,splice:[].splice},x.fn.init.prototype=x.fn,x.extend=x.fn.extend=function(){var e,t,n,r,i,o,s=arguments[0]||{},a=1,u=arguments.length,l=!1;for("boolean"==typeof s&&(l=s,s=arguments[1]||{},a=2),"object"==typeof s||x.isFunction(s)||(s={}),u===a&&(s=this,--a);u>a;a++)if(null!=(e=arguments[a]))for(t in e)n=s[t],r=e[t],s!==r&&(l&&r&&(x.isPlainObject(r)||(i=x.isArray(r)))?(i?(i=!1,o=n&&x.isArray(n)?n:[]):o=n&&x.isPlainObject(n)?n:{},s[t]=x.extend(l,o,r)):r!==undefined&&(s[t]=r));return s},x.extend({expando:"jQuery"+(p+Math.random()).replace(/\D/g,""),noConflict:function(t){return e.$===x&&(e.$=u),t&&e.jQuery===x&&(e.jQuery=a),x},isReady:!1,readyWait:1,holdReady:function(e){e?x.readyWait++:x.ready(!0)},ready:function(e){(e===!0?--x.readyWait:x.isReady)||(x.isReady=!0,e!==!0&&--x.readyWait>0||(n.resolveWith(o,[x]),x.fn.trigger&&x(o).trigger("ready").off("ready")))},isFunction:function(e){return"function"===x.type(e)},isArray:Array.isArray,isWindow:function(e){return null!=e&&e===e.window},isNumeric:function(e){return!isNaN(parseFloat(e))&&isFinite(e)},type:function(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[m.call(e)]||"object":typeof e},isPlainObject:function(e){if("object"!==x.type(e)||e.nodeType||x.isWindow(e))return!1;try{if(e.constructor&&!y.call(e.constructor.prototype,"isPrototypeOf"))return!1}catch(t){return!1}return!0},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},error:function(e){throw Error(e)},parseHTML:function(e,t,n){if(!e||"string"!=typeof e)return null;"boolean"==typeof t&&(n=t,t=!1),t=t||o;var r=C.exec(e),i=!n&&[];return r?[t.createElement(r[1])]:(r=x.buildFragment([e],t,i),i&&x(i).remove(),x.merge([],r.childNodes))},parseJSON:JSON.parse,parseXML:function(e){var t,n;if(!e||"string"!=typeof e)return null;try{n=new DOMParser,t=n.parseFromString(e,"text/xml")}catch(r){t=undefined}return(!t||t.getElementsByTagName("parsererror").length)&&x.error("Invalid XML: "+e),t},noop:function(){},globalEval:function(e){var t,n=eval;e=x.trim(e),e&&(1===e.indexOf("use strict")?(t=o.createElement("script"),t.text=e,o.head.appendChild(t).parentNode.removeChild(t)):n(e))},camelCase:function(e){return e.replace(k,"ms-").replace(N,E)},nodeName:function(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()},each:function(e,t,n){var r,i=0,o=e.length,s=j(e);if(n){if(s){for(;o>i;i++)if(r=t.apply(e[i],n),r===!1)break}else for(i in e)if(r=t.apply(e[i],n),r===!1)break}else if(s){for(;o>i;i++)if(r=t.call(e[i],i,e[i]),r===!1)break}else for(i in e)if(r=t.call(e[i],i,e[i]),r===!1)break;return e},trim:function(e){return null==e?"":v.call(e)},makeArray:function(e,t){var n=t||[];return null!=e&&(j(Object(e))?x.merge(n,"string"==typeof e?[e]:e):h.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:g.call(t,e,n)},merge:function(e,t){var n=t.length,r=e.length,i=0;if("number"==typeof n)for(;n>i;i++)e[r++]=t[i];else while(t[i]!==undefined)e[r++]=t[i++];return e.length=r,e},grep:function(e,t,n){var r,i=[],o=0,s=e.length;for(n=!!n;s>o;o++)r=!!t(e[o],o),n!==r&&i.push(e[o]);return i},map:function(e,t,n){var r,i=0,o=e.length,s=j(e),a=[];if(s)for(;o>i;i++)r=t(e[i],i,n),null!=r&&(a[a.length]=r);else for(i in e)r=t(e[i],i,n),null!=r&&(a[a.length]=r);return f.apply([],a)},guid:1,proxy:function(e,t){var n,r,i;return"string"==typeof t&&(n=e[t],t=e,e=n),x.isFunction(e)?(r=d.call(arguments,2),i=function(){return e.apply(t||this,r.concat(d.call(arguments)))},i.guid=e.guid=e.guid||x.guid++,i):undefined},access:function(e,t,n,r,i,o,s){var a=0,u=e.length,l=null==n;if("object"===x.type(n)){i=!0;for(a in n)x.access(e,t,a,n[a],!0,o,s)}else if(r!==undefined&&(i=!0,x.isFunction(r)||(s=!0),l&&(s?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(x(e),n)})),t))for(;u>a;a++)t(e[a],n,s?r:r.call(e[a],a,t(e[a],n)));return i?e:l?t.call(e):u?t(e[0],n):o},now:Date.now,swap:function(e,t,n,r){var i,o,s={};for(o in t)s[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=s[o];return i}}),x.ready.promise=function(t){return n||(n=x.Deferred(),"complete"===o.readyState?setTimeout(x.ready):(o.addEventListener("DOMContentLoaded",S,!1),e.addEventListener("load",S,!1))),n.promise(t)},x.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(e,t){l["[object "+t+"]"]=t.toLowerCase()});function j(e){var t=e.length,n=x.type(e);return x.isWindow(e)?!1:1===e.nodeType&&t?!0:"array"===n||"function"!==n&&(0===t||"number"==typeof t&&t>0&&t-1 in e)}t=x(o),function(e,undefined){var t,n,r,i,o,s,a,u,l,c,p,f,h,d,g,m,y,v="sizzle"+-new Date,b=e.document,w=0,T=0,C=st(),k=st(),N=st(),E=!1,S=function(e,t){return e===t?(E=!0,0):0},j=typeof undefined,D=1<<31,A={}.hasOwnProperty,L=[],q=L.pop,H=L.push,O=L.push,F=L.slice,P=L.indexOf||function(e){var t=0,n=this.length;for(;n>t;t++)if(this[t]===e)return t;return-1},R="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",W="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",$=W.replace("w","w#"),B="\\["+M+"*("+W+")"+M+"*(?:([*^$|!~]?=)"+M+"*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|("+$+")|)|)"+M+"*\\]",I=":("+W+")(?:\\(((['\"])((?:\\\\.|[^\\\\])*?)\\3|((?:\\\\.|[^\\\\()[\\]]|"+B.replace(3,8)+")*)|.*)\\)|)",z=RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),_=RegExp("^"+M+"*,"+M+"*"),X=RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=RegExp(M+"*[+~]"),Y=RegExp("="+M+"*([^\\]'\"]*)"+M+"*\\]","g"),V=RegExp(I),G=RegExp("^"+$+"$"),J={ID:RegExp("^#("+W+")"),CLASS:RegExp("^\\.("+W+")"),TAG:RegExp("^("+W.replace("w","w*")+")"),ATTR:RegExp("^"+B),PSEUDO:RegExp("^"+I),CHILD:RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:RegExp("^(?:"+R+")$","i"),needsContext:RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Q=/^[^{]+\{\s*\[native \w/,K=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,Z=/^(?:input|select|textarea|button)$/i,et=/^h\d$/i,tt=/'|\\/g,nt=RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),rt=function(e,t,n){var r="0x"+t-65536;return r!==r||n?t:0>r?String.fromCharCode(r+65536):String.fromCharCode(55296|r>>10,56320|1023&r)};try{O.apply(L=F.call(b.childNodes),b.childNodes),L[b.childNodes.length].nodeType}catch(it){O={apply:L.length?function(e,t){H.apply(e,F.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function ot(e,t,r,i){var o,s,a,u,l,f,g,m,x,w;if((t?t.ownerDocument||t:b)!==p&&c(t),t=t||p,r=r||[],!e||"string"!=typeof e)return r;if(1!==(u=t.nodeType)&&9!==u)return[];if(h&&!i){if(o=K.exec(e))if(a=o[1]){if(9===u){if(s=t.getElementById(a),!s||!s.parentNode)return r;if(s.id===a)return r.push(s),r}else if(t.ownerDocument&&(s=t.ownerDocument.getElementById(a))&&y(t,s)&&s.id===a)return r.push(s),r}else{if(o[2])return O.apply(r,t.getElementsByTagName(e)),r;if((a=o[3])&&n.getElementsByClassName&&t.getElementsByClassName)return O.apply(r,t.getElementsByClassName(a)),r}if(n.qsa&&(!d||!d.test(e))){if(m=g=v,x=t,w=9===u&&e,1===u&&"object"!==t.nodeName.toLowerCase()){f=gt(e),(g=t.getAttribute("id"))?m=g.replace(tt,"\\$&"):t.setAttribute("id",m),m="[id='"+m+"'] ",l=f.length;while(l--)f[l]=m+mt(f[l]);x=U.test(e)&&t.parentNode||t,w=f.join(",")}if(w)try{return O.apply(r,x.querySelectorAll(w)),r}catch(T){}finally{g||t.removeAttribute("id")}}}return kt(e.replace(z,"$1"),t,r,i)}function st(){var e=[];function t(n,r){return e.push(n+=" ")>i.cacheLength&&delete t[e.shift()],t[n]=r}return t}function at(e){return e[v]=!0,e}function ut(e){var t=p.createElement("div");try{return!!e(t)}catch(n){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function lt(e,t){var n=e.split("|"),r=e.length;while(r--)i.attrHandle[n[r]]=t}function ct(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&(~t.sourceIndex||D)-(~e.sourceIndex||D);if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function pt(e){return function(t){var n=t.nodeName.toLowerCase();return"input"===n&&t.type===e}}function ft(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function ht(e){return at(function(t){return t=+t,at(function(n,r){var i,o=e([],n.length,t),s=o.length;while(s--)n[i=o[s]]&&(n[i]=!(r[i]=n[i]))})})}s=ot.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return t?"HTML"!==t.nodeName:!1},n=ot.support={},c=ot.setDocument=function(e){var t=e?e.ownerDocument||e:b,r=t.defaultView;return t!==p&&9===t.nodeType&&t.documentElement?(p=t,f=t.documentElement,h=!s(t),r&&r.attachEvent&&r!==r.top&&r.attachEvent("onbeforeunload",function(){c()}),n.attributes=ut(function(e){return e.className="i",!e.getAttribute("className")}),n.getElementsByTagName=ut(function(e){return e.appendChild(t.createComment("")),!e.getElementsByTagName("*").length}),n.getElementsByClassName=ut(function(e){return e.innerHTML="
    ",e.firstChild.className="i",2===e.getElementsByClassName("i").length}),n.getById=ut(function(e){return f.appendChild(e).id=v,!t.getElementsByName||!t.getElementsByName(v).length}),n.getById?(i.find.ID=function(e,t){if(typeof t.getElementById!==j&&h){var n=t.getElementById(e);return n&&n.parentNode?[n]:[]}},i.filter.ID=function(e){var t=e.replace(nt,rt);return function(e){return e.getAttribute("id")===t}}):(delete i.find.ID,i.filter.ID=function(e){var t=e.replace(nt,rt);return function(e){var n=typeof e.getAttributeNode!==j&&e.getAttributeNode("id");return n&&n.value===t}}),i.find.TAG=n.getElementsByTagName?function(e,t){return typeof t.getElementsByTagName!==j?t.getElementsByTagName(e):undefined}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},i.find.CLASS=n.getElementsByClassName&&function(e,t){return typeof t.getElementsByClassName!==j&&h?t.getElementsByClassName(e):undefined},g=[],d=[],(n.qsa=Q.test(t.querySelectorAll))&&(ut(function(e){e.innerHTML="",e.querySelectorAll("[selected]").length||d.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll(":checked").length||d.push(":checked")}),ut(function(e){var n=t.createElement("input");n.setAttribute("type","hidden"),e.appendChild(n).setAttribute("t",""),e.querySelectorAll("[t^='']").length&&d.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll(":enabled").length||d.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),d.push(",.*:")})),(n.matchesSelector=Q.test(m=f.webkitMatchesSelector||f.mozMatchesSelector||f.oMatchesSelector||f.msMatchesSelector))&&ut(function(e){n.disconnectedMatch=m.call(e,"div"),m.call(e,"[s!='']:x"),g.push("!=",I)}),d=d.length&&RegExp(d.join("|")),g=g.length&&RegExp(g.join("|")),y=Q.test(f.contains)||f.compareDocumentPosition?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},S=f.compareDocumentPosition?function(e,r){if(e===r)return E=!0,0;var i=r.compareDocumentPosition&&e.compareDocumentPosition&&e.compareDocumentPosition(r);return i?1&i||!n.sortDetached&&r.compareDocumentPosition(e)===i?e===t||y(b,e)?-1:r===t||y(b,r)?1:l?P.call(l,e)-P.call(l,r):0:4&i?-1:1:e.compareDocumentPosition?-1:1}:function(e,n){var r,i=0,o=e.parentNode,s=n.parentNode,a=[e],u=[n];if(e===n)return E=!0,0;if(!o||!s)return e===t?-1:n===t?1:o?-1:s?1:l?P.call(l,e)-P.call(l,n):0;if(o===s)return ct(e,n);r=e;while(r=r.parentNode)a.unshift(r);r=n;while(r=r.parentNode)u.unshift(r);while(a[i]===u[i])i++;return i?ct(a[i],u[i]):a[i]===b?-1:u[i]===b?1:0},t):p},ot.matches=function(e,t){return ot(e,null,null,t)},ot.matchesSelector=function(e,t){if((e.ownerDocument||e)!==p&&c(e),t=t.replace(Y,"='$1']"),!(!n.matchesSelector||!h||g&&g.test(t)||d&&d.test(t)))try{var r=m.call(e,t);if(r||n.disconnectedMatch||e.document&&11!==e.document.nodeType)return r}catch(i){}return ot(t,p,null,[e]).length>0},ot.contains=function(e,t){return(e.ownerDocument||e)!==p&&c(e),y(e,t)},ot.attr=function(e,t){(e.ownerDocument||e)!==p&&c(e);var r=i.attrHandle[t.toLowerCase()],o=r&&A.call(i.attrHandle,t.toLowerCase())?r(e,t,!h):undefined;return o===undefined?n.attributes||!h?e.getAttribute(t):(o=e.getAttributeNode(t))&&o.specified?o.value:null:o},ot.error=function(e){throw Error("Syntax error, unrecognized expression: "+e)},ot.uniqueSort=function(e){var t,r=[],i=0,o=0;if(E=!n.detectDuplicates,l=!n.sortStable&&e.slice(0),e.sort(S),E){while(t=e[o++])t===e[o]&&(i=r.push(o));while(i--)e.splice(r[i],1)}return e},o=ot.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else for(;t=e[r];r++)n+=o(t);return n},i=ot.selectors={cacheLength:50,createPseudo:at,match:J,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(nt,rt),e[3]=(e[4]||e[5]||"").replace(nt,rt),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||ot.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&ot.error(e[0]),e},PSEUDO:function(e){var t,n=!e[5]&&e[2];return J.CHILD.test(e[0])?null:(e[3]&&e[4]!==undefined?e[2]=e[4]:n&&V.test(n)&&(t=gt(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(nt,rt).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=C[e+" "];return t||(t=RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&C(e,function(e){return t.test("string"==typeof e.className&&e.className||typeof e.getAttribute!==j&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=ot.attr(r,e);return null==i?"!="===t:t?(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i+" ").indexOf(n)>-1:"|="===t?i===n||i.slice(0,n.length+1)===n+"-":!1):!0}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),s="last"!==e.slice(-4),a="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,p,f,h,d,g=o!==s?"nextSibling":"previousSibling",m=t.parentNode,y=a&&t.nodeName.toLowerCase(),x=!u&&!a;if(m){if(o){while(g){p=t;while(p=p[g])if(a?p.nodeName.toLowerCase()===y:1===p.nodeType)return!1;d=g="only"===e&&!d&&"nextSibling"}return!0}if(d=[s?m.firstChild:m.lastChild],s&&x){c=m[v]||(m[v]={}),l=c[e]||[],h=l[0]===w&&l[1],f=l[0]===w&&l[2],p=h&&m.childNodes[h];while(p=++h&&p&&p[g]||(f=h=0)||d.pop())if(1===p.nodeType&&++f&&p===t){c[e]=[w,h,f];break}}else if(x&&(l=(t[v]||(t[v]={}))[e])&&l[0]===w)f=l[1];else while(p=++h&&p&&p[g]||(f=h=0)||d.pop())if((a?p.nodeName.toLowerCase()===y:1===p.nodeType)&&++f&&(x&&((p[v]||(p[v]={}))[e]=[w,f]),p===t))break;return f-=i,f===r||0===f%r&&f/r>=0}}},PSEUDO:function(e,t){var n,r=i.pseudos[e]||i.setFilters[e.toLowerCase()]||ot.error("unsupported pseudo: "+e);return r[v]?r(t):r.length>1?(n=[e,e,"",t],i.setFilters.hasOwnProperty(e.toLowerCase())?at(function(e,n){var i,o=r(e,t),s=o.length;while(s--)i=P.call(e,o[s]),e[i]=!(n[i]=o[s])}):function(e){return r(e,0,n)}):r}},pseudos:{not:at(function(e){var t=[],n=[],r=a(e.replace(z,"$1"));return r[v]?at(function(e,t,n,i){var o,s=r(e,null,i,[]),a=e.length;while(a--)(o=s[a])&&(e[a]=!(t[a]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),!n.pop()}}),has:at(function(e){return function(t){return ot(e,t).length>0}}),contains:at(function(e){return function(t){return(t.textContent||t.innerText||o(t)).indexOf(e)>-1}}),lang:at(function(e){return G.test(e||"")||ot.error("unsupported lang: "+e),e=e.replace(nt,rt).toLowerCase(),function(t){var n;do if(n=h?t.lang:t.getAttribute("xml:lang")||t.getAttribute("lang"))return n=n.toLowerCase(),n===e||0===n.indexOf(e+"-");while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===f},focus:function(e){return e===p.activeElement&&(!p.hasFocus||p.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:function(e){return e.disabled===!1},disabled:function(e){return e.disabled===!0},checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,e.selected===!0},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeName>"@"||3===e.nodeType||4===e.nodeType)return!1;return!0},parent:function(e){return!i.pseudos.empty(e)},header:function(e){return et.test(e.nodeName)},input:function(e){return Z.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||t.toLowerCase()===e.type)},first:ht(function(){return[0]}),last:ht(function(e,t){return[t-1]}),eq:ht(function(e,t,n){return[0>n?n+t:n]}),even:ht(function(e,t){var n=0;for(;t>n;n+=2)e.push(n);return e}),odd:ht(function(e,t){var n=1;for(;t>n;n+=2)e.push(n);return e}),lt:ht(function(e,t,n){var r=0>n?n+t:n;for(;--r>=0;)e.push(r);return e}),gt:ht(function(e,t,n){var r=0>n?n+t:n;for(;t>++r;)e.push(r);return e})}},i.pseudos.nth=i.pseudos.eq;for(t in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})i.pseudos[t]=pt(t);for(t in{submit:!0,reset:!0})i.pseudos[t]=ft(t);function dt(){}dt.prototype=i.filters=i.pseudos,i.setFilters=new dt;function gt(e,t){var n,r,o,s,a,u,l,c=k[e+" "];if(c)return t?0:c.slice(0);a=e,u=[],l=i.preFilter;while(a){(!n||(r=_.exec(a)))&&(r&&(a=a.slice(r[0].length)||a),u.push(o=[])),n=!1,(r=X.exec(a))&&(n=r.shift(),o.push({value:n,type:r[0].replace(z," ")}),a=a.slice(n.length));for(s in i.filter)!(r=J[s].exec(a))||l[s]&&!(r=l[s](r))||(n=r.shift(),o.push({value:n,type:s,matches:r}),a=a.slice(n.length));if(!n)break}return t?a.length:a?ot.error(e):k(e,u).slice(0)}function mt(e){var t=0,n=e.length,r="";for(;n>t;t++)r+=e[t].value;return r}function yt(e,t,n){var i=t.dir,o=n&&"parentNode"===i,s=T++;return t.first?function(t,n,r){while(t=t[i])if(1===t.nodeType||o)return e(t,n,r)}:function(t,n,a){var u,l,c,p=w+" "+s;if(a){while(t=t[i])if((1===t.nodeType||o)&&e(t,n,a))return!0}else while(t=t[i])if(1===t.nodeType||o)if(c=t[v]||(t[v]={}),(l=c[i])&&l[0]===p){if((u=l[1])===!0||u===r)return u===!0}else if(l=c[i]=[p],l[1]=e(t,n,a)||r,l[1]===!0)return!0}}function vt(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function xt(e,t,n,r,i){var o,s=[],a=0,u=e.length,l=null!=t;for(;u>a;a++)(o=e[a])&&(!n||n(o,r,i))&&(s.push(o),l&&t.push(a));return s}function bt(e,t,n,r,i,o){return r&&!r[v]&&(r=bt(r)),i&&!i[v]&&(i=bt(i,o)),at(function(o,s,a,u){var l,c,p,f=[],h=[],d=s.length,g=o||Ct(t||"*",a.nodeType?[a]:a,[]),m=!e||!o&&t?g:xt(g,f,e,a,u),y=n?i||(o?e:d||r)?[]:s:m;if(n&&n(m,y,a,u),r){l=xt(y,h),r(l,[],a,u),c=l.length;while(c--)(p=l[c])&&(y[h[c]]=!(m[h[c]]=p))}if(o){if(i||e){if(i){l=[],c=y.length;while(c--)(p=y[c])&&l.push(m[c]=p);i(null,y=[],l,u)}c=y.length;while(c--)(p=y[c])&&(l=i?P.call(o,p):f[c])>-1&&(o[l]=!(s[l]=p))}}else y=xt(y===s?y.splice(d,y.length):y),i?i(null,s,y,u):O.apply(s,y)})}function wt(e){var t,n,r,o=e.length,s=i.relative[e[0].type],a=s||i.relative[" "],l=s?1:0,c=yt(function(e){return e===t},a,!0),p=yt(function(e){return P.call(t,e)>-1},a,!0),f=[function(e,n,r){return!s&&(r||n!==u)||((t=n).nodeType?c(e,n,r):p(e,n,r))}];for(;o>l;l++)if(n=i.relative[e[l].type])f=[yt(vt(f),n)];else{if(n=i.filter[e[l].type].apply(null,e[l].matches),n[v]){for(r=++l;o>r;r++)if(i.relative[e[r].type])break;return bt(l>1&&vt(f),l>1&&mt(e.slice(0,l-1).concat({value:" "===e[l-2].type?"*":""})).replace(z,"$1"),n,r>l&&wt(e.slice(l,r)),o>r&&wt(e=e.slice(r)),o>r&&mt(e))}f.push(n)}return vt(f)}function Tt(e,t){var n=0,o=t.length>0,s=e.length>0,a=function(a,l,c,f,h){var d,g,m,y=[],v=0,x="0",b=a&&[],T=null!=h,C=u,k=a||s&&i.find.TAG("*",h&&l.parentNode||l),N=w+=null==C?1:Math.random()||.1;for(T&&(u=l!==p&&l,r=n);null!=(d=k[x]);x++){if(s&&d){g=0;while(m=e[g++])if(m(d,l,c)){f.push(d);break}T&&(w=N,r=++n)}o&&((d=!m&&d)&&v--,a&&b.push(d))}if(v+=x,o&&x!==v){g=0;while(m=t[g++])m(b,y,l,c);if(a){if(v>0)while(x--)b[x]||y[x]||(y[x]=q.call(f));y=xt(y)}O.apply(f,y),T&&!a&&y.length>0&&v+t.length>1&&ot.uniqueSort(f)}return T&&(w=N,u=C),b};return o?at(a):a}a=ot.compile=function(e,t){var n,r=[],i=[],o=N[e+" "];if(!o){t||(t=gt(e)),n=t.length;while(n--)o=wt(t[n]),o[v]?r.push(o):i.push(o);o=N(e,Tt(i,r))}return o};function Ct(e,t,n){var r=0,i=t.length;for(;i>r;r++)ot(e,t[r],n);return n}function kt(e,t,r,o){var s,u,l,c,p,f=gt(e);if(!o&&1===f.length){if(u=f[0]=f[0].slice(0),u.length>2&&"ID"===(l=u[0]).type&&n.getById&&9===t.nodeType&&h&&i.relative[u[1].type]){if(t=(i.find.ID(l.matches[0].replace(nt,rt),t)||[])[0],!t)return r;e=e.slice(u.shift().value.length)}s=J.needsContext.test(e)?0:u.length;while(s--){if(l=u[s],i.relative[c=l.type])break;if((p=i.find[c])&&(o=p(l.matches[0].replace(nt,rt),U.test(u[0].type)&&t.parentNode||t))){if(u.splice(s,1),e=o.length&&mt(u),!e)return O.apply(r,o),r;break}}}return a(e,f)(o,t,!h,r,U.test(e)),r}n.sortStable=v.split("").sort(S).join("")===v,n.detectDuplicates=E,c(),n.sortDetached=ut(function(e){return 1&e.compareDocumentPosition(p.createElement("div"))}),ut(function(e){return e.innerHTML="","#"===e.firstChild.getAttribute("href")})||lt("type|href|height|width",function(e,t,n){return n?undefined:e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),n.attributes&&ut(function(e){return e.innerHTML="",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||lt("value",function(e,t,n){return n||"input"!==e.nodeName.toLowerCase()?undefined:e.defaultValue}),ut(function(e){return null==e.getAttribute("disabled")})||lt(R,function(e,t,n){var r;return n?undefined:(r=e.getAttributeNode(t))&&r.specified?r.value:e[t]===!0?t.toLowerCase():null}),x.find=ot,x.expr=ot.selectors,x.expr[":"]=x.expr.pseudos,x.unique=ot.uniqueSort,x.text=ot.getText,x.isXMLDoc=ot.isXML,x.contains=ot.contains}(e);var D={};function A(e){var t=D[e]={};return x.each(e.match(w)||[],function(e,n){t[n]=!0}),t}x.Callbacks=function(e){e="string"==typeof e?D[e]||A(e):x.extend({},e);var t,n,r,i,o,s,a=[],u=!e.once&&[],l=function(p){for(t=e.memory&&p,n=!0,s=i||0,i=0,o=a.length,r=!0;a&&o>s;s++)if(a[s].apply(p[0],p[1])===!1&&e.stopOnFalse){t=!1;break}r=!1,a&&(u?u.length&&l(u.shift()):t?a=[]:c.disable())},c={add:function(){if(a){var n=a.length;(function s(t){x.each(t,function(t,n){var r=x.type(n);"function"===r?e.unique&&c.has(n)||a.push(n):n&&n.length&&"string"!==r&&s(n)})})(arguments),r?o=a.length:t&&(i=n,l(t))}return this},remove:function(){return a&&x.each(arguments,function(e,t){var n;while((n=x.inArray(t,a,n))>-1)a.splice(n,1),r&&(o>=n&&o--,s>=n&&s--)}),this},has:function(e){return e?x.inArray(e,a)>-1:!(!a||!a.length)},empty:function(){return a=[],o=0,this},disable:function(){return a=u=t=undefined,this},disabled:function(){return!a},lock:function(){return u=undefined,t||c.disable(),this},locked:function(){return!u},fireWith:function(e,t){return!a||n&&!u||(t=t||[],t=[e,t.slice?t.slice():t],r?u.push(t):l(t)),this},fire:function(){return c.fireWith(this,arguments),this},fired:function(){return!!n}};return c},x.extend({Deferred:function(e){var t=[["resolve","done",x.Callbacks("once memory"),"resolved"],["reject","fail",x.Callbacks("once memory"),"rejected"],["notify","progress",x.Callbacks("memory")]],n="pending",r={state:function(){return n},always:function(){return i.done(arguments).fail(arguments),this},then:function(){var e=arguments;return x.Deferred(function(n){x.each(t,function(t,o){var s=o[0],a=x.isFunction(e[t])&&e[t];i[o[1]](function(){var e=a&&a.apply(this,arguments);e&&x.isFunction(e.promise)?e.promise().done(n.resolve).fail(n.reject).progress(n.notify):n[s+"With"](this===r?n.promise():this,a?[e]:arguments)})}),e=null}).promise()},promise:function(e){return null!=e?x.extend(e,r):r}},i={};return r.pipe=r.then,x.each(t,function(e,o){var s=o[2],a=o[3];r[o[1]]=s.add,a&&s.add(function(){n=a},t[1^e][2].disable,t[2][2].lock),i[o[0]]=function(){return i[o[0]+"With"](this===i?r:this,arguments),this},i[o[0]+"With"]=s.fireWith}),r.promise(i),e&&e.call(i,i),i},when:function(e){var t=0,n=d.call(arguments),r=n.length,i=1!==r||e&&x.isFunction(e.promise)?r:0,o=1===i?e:x.Deferred(),s=function(e,t,n){return function(r){t[e]=this,n[e]=arguments.length>1?d.call(arguments):r,n===a?o.notifyWith(t,n):--i||o.resolveWith(t,n)}},a,u,l;if(r>1)for(a=Array(r),u=Array(r),l=Array(r);r>t;t++)n[t]&&x.isFunction(n[t].promise)?n[t].promise().done(s(t,l,n)).fail(o.reject).progress(s(t,u,a)):--i;return i||o.resolveWith(l,n),o.promise()}}),x.support=function(t){var n=o.createElement("input"),r=o.createDocumentFragment(),i=o.createElement("div"),s=o.createElement("select"),a=s.appendChild(o.createElement("option"));return n.type?(n.type="checkbox",t.checkOn=""!==n.value,t.optSelected=a.selected,t.reliableMarginRight=!0,t.boxSizingReliable=!0,t.pixelPosition=!1,n.checked=!0,t.noCloneChecked=n.cloneNode(!0).checked,s.disabled=!0,t.optDisabled=!a.disabled,n=o.createElement("input"),n.value="t",n.type="radio",t.radioValue="t"===n.value,n.setAttribute("checked","t"),n.setAttribute("name","t"),r.appendChild(n),t.checkClone=r.cloneNode(!0).cloneNode(!0).lastChild.checked,t.focusinBubbles="onfocusin"in e,i.style.backgroundClip="content-box",i.cloneNode(!0).style.backgroundClip="",t.clearCloneStyle="content-box"===i.style.backgroundClip,x(function(){var n,r,s="padding:0;margin:0;border:0;display:block;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box",a=o.getElementsByTagName("body")[0];a&&(n=o.createElement("div"),n.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",a.appendChild(n).appendChild(i),i.innerHTML="",i.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%",x.swap(a,null!=a.style.zoom?{zoom:1}:{},function(){t.boxSizing=4===i.offsetWidth}),e.getComputedStyle&&(t.pixelPosition="1%"!==(e.getComputedStyle(i,null)||{}).top,t.boxSizingReliable="4px"===(e.getComputedStyle(i,null)||{width:"4px"}).width,r=i.appendChild(o.createElement("div")),r.style.cssText=i.style.cssText=s,r.style.marginRight=r.style.width="0",i.style.width="1px",t.reliableMarginRight=!parseFloat((e.getComputedStyle(r,null)||{}).marginRight)),a.removeChild(n))}),t):t}({});var L,q,H=/(?:\{[\s\S]*\}|\[[\s\S]*\])$/,O=/([A-Z])/g;function F(){Object.defineProperty(this.cache={},0,{get:function(){return{}}}),this.expando=x.expando+Math.random()}F.uid=1,F.accepts=function(e){return e.nodeType?1===e.nodeType||9===e.nodeType:!0},F.prototype={key:function(e){if(!F.accepts(e))return 0;var t={},n=e[this.expando];if(!n){n=F.uid++;try{t[this.expando]={value:n},Object.defineProperties(e,t)}catch(r){t[this.expando]=n,x.extend(e,t)}}return this.cache[n]||(this.cache[n]={}),n},set:function(e,t,n){var r,i=this.key(e),o=this.cache[i];if("string"==typeof t)o[t]=n;else if(x.isEmptyObject(o))x.extend(this.cache[i],t);else for(r in t)o[r]=t[r];return o},get:function(e,t){var n=this.cache[this.key(e)];return t===undefined?n:n[t]},access:function(e,t,n){var r;return t===undefined||t&&"string"==typeof t&&n===undefined?(r=this.get(e,t),r!==undefined?r:this.get(e,x.camelCase(t))):(this.set(e,t,n),n!==undefined?n:t)},remove:function(e,t){var n,r,i,o=this.key(e),s=this.cache[o];if(t===undefined)this.cache[o]={};else{x.isArray(t)?r=t.concat(t.map(x.camelCase)):(i=x.camelCase(t),t in s?r=[t,i]:(r=i,r=r in s?[r]:r.match(w)||[])),n=r.length;while(n--)delete s[r[n]]}},hasData:function(e){return!x.isEmptyObject(this.cache[e[this.expando]]||{})},discard:function(e){e[this.expando]&&delete this.cache[e[this.expando]]}},L=new F,q=new F,x.extend({acceptData:F.accepts,hasData:function(e){return L.hasData(e)||q.hasData(e)},data:function(e,t,n){return L.access(e,t,n)},removeData:function(e,t){L.remove(e,t)},_data:function(e,t,n){return q.access(e,t,n)},_removeData:function(e,t){q.remove(e,t)}}),x.fn.extend({data:function(e,t){var n,r,i=this[0],o=0,s=null;if(e===undefined){if(this.length&&(s=L.get(i),1===i.nodeType&&!q.get(i,"hasDataAttrs"))){for(n=i.attributes;n.length>o;o++)r=n[o].name,0===r.indexOf("data-")&&(r=x.camelCase(r.slice(5)),P(i,r,s[r]));q.set(i,"hasDataAttrs",!0)}return s}return"object"==typeof e?this.each(function(){L.set(this,e)}):x.access(this,function(t){var n,r=x.camelCase(e);if(i&&t===undefined){if(n=L.get(i,e),n!==undefined)return n;if(n=L.get(i,r),n!==undefined)return n;if(n=P(i,r,undefined),n!==undefined)return n}else this.each(function(){var n=L.get(this,r);L.set(this,r,t),-1!==e.indexOf("-")&&n!==undefined&&L.set(this,e,t)})},null,t,arguments.length>1,null,!0)},removeData:function(e){return this.each(function(){L.remove(this,e)})}});function P(e,t,n){var r;if(n===undefined&&1===e.nodeType)if(r="data-"+t.replace(O,"-$1").toLowerCase(),n=e.getAttribute(r),"string"==typeof n){try{n="true"===n?!0:"false"===n?!1:"null"===n?null:+n+""===n?+n:H.test(n)?JSON.parse(n):n}catch(i){}L.set(e,t,n)}else n=undefined;return n}x.extend({queue:function(e,t,n){var r;return e?(t=(t||"fx")+"queue",r=q.get(e,t),n&&(!r||x.isArray(n)?r=q.access(e,t,x.makeArray(n)):r.push(n)),r||[]):undefined},dequeue:function(e,t){t=t||"fx";var n=x.queue(e,t),r=n.length,i=n.shift(),o=x._queueHooks(e,t),s=function(){x.dequeue(e,t) -};"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,s,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return q.get(e,n)||q.access(e,n,{empty:x.Callbacks("once memory").add(function(){q.remove(e,[t+"queue",n])})})}}),x.fn.extend({queue:function(e,t){var n=2;return"string"!=typeof e&&(t=e,e="fx",n--),n>arguments.length?x.queue(this[0],e):t===undefined?this:this.each(function(){var n=x.queue(this,e,t);x._queueHooks(this,e),"fx"===e&&"inprogress"!==n[0]&&x.dequeue(this,e)})},dequeue:function(e){return this.each(function(){x.dequeue(this,e)})},delay:function(e,t){return e=x.fx?x.fx.speeds[e]||e:e,t=t||"fx",this.queue(t,function(t,n){var r=setTimeout(t,e);n.stop=function(){clearTimeout(r)}})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=x.Deferred(),o=this,s=this.length,a=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=undefined),e=e||"fx";while(s--)n=q.get(o[s],e+"queueHooks"),n&&n.empty&&(r++,n.empty.add(a));return a(),i.promise(t)}});var R,M,W=/[\t\r\n\f]/g,$=/\r/g,B=/^(?:input|select|textarea|button)$/i;x.fn.extend({attr:function(e,t){return x.access(this,x.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){x.removeAttr(this,e)})},prop:function(e,t){return x.access(this,x.prop,e,t,arguments.length>1)},removeProp:function(e){return this.each(function(){delete this[x.propFix[e]||e]})},addClass:function(e){var t,n,r,i,o,s=0,a=this.length,u="string"==typeof e&&e;if(x.isFunction(e))return this.each(function(t){x(this).addClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];a>s;s++)if(n=this[s],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(W," "):" ")){o=0;while(i=t[o++])0>r.indexOf(" "+i+" ")&&(r+=i+" ");n.className=x.trim(r)}return this},removeClass:function(e){var t,n,r,i,o,s=0,a=this.length,u=0===arguments.length||"string"==typeof e&&e;if(x.isFunction(e))return this.each(function(t){x(this).removeClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];a>s;s++)if(n=this[s],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(W," "):"")){o=0;while(i=t[o++])while(r.indexOf(" "+i+" ")>=0)r=r.replace(" "+i+" "," ");n.className=e?x.trim(r):""}return this},toggleClass:function(e,t){var n=typeof e;return"boolean"==typeof t&&"string"===n?t?this.addClass(e):this.removeClass(e):x.isFunction(e)?this.each(function(n){x(this).toggleClass(e.call(this,n,this.className,t),t)}):this.each(function(){if("string"===n){var t,i=0,o=x(this),s=e.match(w)||[];while(t=s[i++])o.hasClass(t)?o.removeClass(t):o.addClass(t)}else(n===r||"boolean"===n)&&(this.className&&q.set(this,"__className__",this.className),this.className=this.className||e===!1?"":q.get(this,"__className__")||"")})},hasClass:function(e){var t=" "+e+" ",n=0,r=this.length;for(;r>n;n++)if(1===this[n].nodeType&&(" "+this[n].className+" ").replace(W," ").indexOf(t)>=0)return!0;return!1},val:function(e){var t,n,r,i=this[0];{if(arguments.length)return r=x.isFunction(e),this.each(function(n){var i;1===this.nodeType&&(i=r?e.call(this,n,x(this).val()):e,null==i?i="":"number"==typeof i?i+="":x.isArray(i)&&(i=x.map(i,function(e){return null==e?"":e+""})),t=x.valHooks[this.type]||x.valHooks[this.nodeName.toLowerCase()],t&&"set"in t&&t.set(this,i,"value")!==undefined||(this.value=i))});if(i)return t=x.valHooks[i.type]||x.valHooks[i.nodeName.toLowerCase()],t&&"get"in t&&(n=t.get(i,"value"))!==undefined?n:(n=i.value,"string"==typeof n?n.replace($,""):null==n?"":n)}}}),x.extend({valHooks:{option:{get:function(e){var t=e.attributes.value;return!t||t.specified?e.value:e.text}},select:{get:function(e){var t,n,r=e.options,i=e.selectedIndex,o="select-one"===e.type||0>i,s=o?null:[],a=o?i+1:r.length,u=0>i?a:o?i:0;for(;a>u;u++)if(n=r[u],!(!n.selected&&u!==i||(x.support.optDisabled?n.disabled:null!==n.getAttribute("disabled"))||n.parentNode.disabled&&x.nodeName(n.parentNode,"optgroup"))){if(t=x(n).val(),o)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=x.makeArray(t),s=i.length;while(s--)r=i[s],(r.selected=x.inArray(x(r).val(),o)>=0)&&(n=!0);return n||(e.selectedIndex=-1),o}}},attr:function(e,t,n){var i,o,s=e.nodeType;if(e&&3!==s&&8!==s&&2!==s)return typeof e.getAttribute===r?x.prop(e,t,n):(1===s&&x.isXMLDoc(e)||(t=t.toLowerCase(),i=x.attrHooks[t]||(x.expr.match.bool.test(t)?M:R)),n===undefined?i&&"get"in i&&null!==(o=i.get(e,t))?o:(o=x.find.attr(e,t),null==o?undefined:o):null!==n?i&&"set"in i&&(o=i.set(e,n,t))!==undefined?o:(e.setAttribute(t,n+""),n):(x.removeAttr(e,t),undefined))},removeAttr:function(e,t){var n,r,i=0,o=t&&t.match(w);if(o&&1===e.nodeType)while(n=o[i++])r=x.propFix[n]||n,x.expr.match.bool.test(n)&&(e[r]=!1),e.removeAttribute(n)},attrHooks:{type:{set:function(e,t){if(!x.support.radioValue&&"radio"===t&&x.nodeName(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},propFix:{"for":"htmlFor","class":"className"},prop:function(e,t,n){var r,i,o,s=e.nodeType;if(e&&3!==s&&8!==s&&2!==s)return o=1!==s||!x.isXMLDoc(e),o&&(t=x.propFix[t]||t,i=x.propHooks[t]),n!==undefined?i&&"set"in i&&(r=i.set(e,n,t))!==undefined?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){return e.hasAttribute("tabindex")||B.test(e.nodeName)||e.href?e.tabIndex:-1}}}}),M={set:function(e,t,n){return t===!1?x.removeAttr(e,n):e.setAttribute(n,n),n}},x.each(x.expr.match.bool.source.match(/\w+/g),function(e,t){var n=x.expr.attrHandle[t]||x.find.attr;x.expr.attrHandle[t]=function(e,t,r){var i=x.expr.attrHandle[t],o=r?undefined:(x.expr.attrHandle[t]=undefined)!=n(e,t,r)?t.toLowerCase():null;return x.expr.attrHandle[t]=i,o}}),x.support.optSelected||(x.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null}}),x.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){x.propFix[this.toLowerCase()]=this}),x.each(["radio","checkbox"],function(){x.valHooks[this]={set:function(e,t){return x.isArray(t)?e.checked=x.inArray(x(e).val(),t)>=0:undefined}},x.support.checkOn||(x.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})});var I=/^key/,z=/^(?:mouse|contextmenu)|click/,_=/^(?:focusinfocus|focusoutblur)$/,X=/^([^.]*)(?:\.(.+)|)$/;function U(){return!0}function Y(){return!1}function V(){try{return o.activeElement}catch(e){}}x.event={global:{},add:function(e,t,n,i,o){var s,a,u,l,c,p,f,h,d,g,m,y=q.get(e);if(y){n.handler&&(s=n,n=s.handler,o=s.selector),n.guid||(n.guid=x.guid++),(l=y.events)||(l=y.events={}),(a=y.handle)||(a=y.handle=function(e){return typeof x===r||e&&x.event.triggered===e.type?undefined:x.event.dispatch.apply(a.elem,arguments)},a.elem=e),t=(t||"").match(w)||[""],c=t.length;while(c--)u=X.exec(t[c])||[],d=m=u[1],g=(u[2]||"").split(".").sort(),d&&(f=x.event.special[d]||{},d=(o?f.delegateType:f.bindType)||d,f=x.event.special[d]||{},p=x.extend({type:d,origType:m,data:i,handler:n,guid:n.guid,selector:o,needsContext:o&&x.expr.match.needsContext.test(o),namespace:g.join(".")},s),(h=l[d])||(h=l[d]=[],h.delegateCount=0,f.setup&&f.setup.call(e,i,g,a)!==!1||e.addEventListener&&e.addEventListener(d,a,!1)),f.add&&(f.add.call(e,p),p.handler.guid||(p.handler.guid=n.guid)),o?h.splice(h.delegateCount++,0,p):h.push(p),x.event.global[d]=!0);e=null}},remove:function(e,t,n,r,i){var o,s,a,u,l,c,p,f,h,d,g,m=q.hasData(e)&&q.get(e);if(m&&(u=m.events)){t=(t||"").match(w)||[""],l=t.length;while(l--)if(a=X.exec(t[l])||[],h=g=a[1],d=(a[2]||"").split(".").sort(),h){p=x.event.special[h]||{},h=(r?p.delegateType:p.bindType)||h,f=u[h]||[],a=a[2]&&RegExp("(^|\\.)"+d.join("\\.(?:.*\\.|)")+"(\\.|$)"),s=o=f.length;while(o--)c=f[o],!i&&g!==c.origType||n&&n.guid!==c.guid||a&&!a.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(f.splice(o,1),c.selector&&f.delegateCount--,p.remove&&p.remove.call(e,c));s&&!f.length&&(p.teardown&&p.teardown.call(e,d,m.handle)!==!1||x.removeEvent(e,h,m.handle),delete u[h])}else for(h in u)x.event.remove(e,h+t[l],n,r,!0);x.isEmptyObject(u)&&(delete m.handle,q.remove(e,"events"))}},trigger:function(t,n,r,i){var s,a,u,l,c,p,f,h=[r||o],d=y.call(t,"type")?t.type:t,g=y.call(t,"namespace")?t.namespace.split("."):[];if(a=u=r=r||o,3!==r.nodeType&&8!==r.nodeType&&!_.test(d+x.event.triggered)&&(d.indexOf(".")>=0&&(g=d.split("."),d=g.shift(),g.sort()),c=0>d.indexOf(":")&&"on"+d,t=t[x.expando]?t:new x.Event(d,"object"==typeof t&&t),t.isTrigger=i?2:3,t.namespace=g.join("."),t.namespace_re=t.namespace?RegExp("(^|\\.)"+g.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,t.result=undefined,t.target||(t.target=r),n=null==n?[t]:x.makeArray(n,[t]),f=x.event.special[d]||{},i||!f.trigger||f.trigger.apply(r,n)!==!1)){if(!i&&!f.noBubble&&!x.isWindow(r)){for(l=f.delegateType||d,_.test(l+d)||(a=a.parentNode);a;a=a.parentNode)h.push(a),u=a;u===(r.ownerDocument||o)&&h.push(u.defaultView||u.parentWindow||e)}s=0;while((a=h[s++])&&!t.isPropagationStopped())t.type=s>1?l:f.bindType||d,p=(q.get(a,"events")||{})[t.type]&&q.get(a,"handle"),p&&p.apply(a,n),p=c&&a[c],p&&x.acceptData(a)&&p.apply&&p.apply(a,n)===!1&&t.preventDefault();return t.type=d,i||t.isDefaultPrevented()||f._default&&f._default.apply(h.pop(),n)!==!1||!x.acceptData(r)||c&&x.isFunction(r[d])&&!x.isWindow(r)&&(u=r[c],u&&(r[c]=null),x.event.triggered=d,r[d](),x.event.triggered=undefined,u&&(r[c]=u)),t.result}},dispatch:function(e){e=x.event.fix(e);var t,n,r,i,o,s=[],a=d.call(arguments),u=(q.get(this,"events")||{})[e.type]||[],l=x.event.special[e.type]||{};if(a[0]=e,e.delegateTarget=this,!l.preDispatch||l.preDispatch.call(this,e)!==!1){s=x.event.handlers.call(this,e,u),t=0;while((i=s[t++])&&!e.isPropagationStopped()){e.currentTarget=i.elem,n=0;while((o=i.handlers[n++])&&!e.isImmediatePropagationStopped())(!e.namespace_re||e.namespace_re.test(o.namespace))&&(e.handleObj=o,e.data=o.data,r=((x.event.special[o.origType]||{}).handle||o.handler).apply(i.elem,a),r!==undefined&&(e.result=r)===!1&&(e.preventDefault(),e.stopPropagation()))}return l.postDispatch&&l.postDispatch.call(this,e),e.result}},handlers:function(e,t){var n,r,i,o,s=[],a=t.delegateCount,u=e.target;if(a&&u.nodeType&&(!e.button||"click"!==e.type))for(;u!==this;u=u.parentNode||this)if(u.disabled!==!0||"click"!==e.type){for(r=[],n=0;a>n;n++)o=t[n],i=o.selector+" ",r[i]===undefined&&(r[i]=o.needsContext?x(i,this).index(u)>=0:x.find(i,this,null,[u]).length),r[i]&&r.push(o);r.length&&s.push({elem:u,handlers:r})}return t.length>a&&s.push({elem:this,handlers:t.slice(a)}),s},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(e,t){return null==e.which&&(e.which=null!=t.charCode?t.charCode:t.keyCode),e}},mouseHooks:{props:"button buttons clientX clientY offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(e,t){var n,r,i,s=t.button;return null==e.pageX&&null!=t.clientX&&(n=e.target.ownerDocument||o,r=n.documentElement,i=n.body,e.pageX=t.clientX+(r&&r.scrollLeft||i&&i.scrollLeft||0)-(r&&r.clientLeft||i&&i.clientLeft||0),e.pageY=t.clientY+(r&&r.scrollTop||i&&i.scrollTop||0)-(r&&r.clientTop||i&&i.clientTop||0)),e.which||s===undefined||(e.which=1&s?1:2&s?3:4&s?2:0),e}},fix:function(e){if(e[x.expando])return e;var t,n,r,i=e.type,s=e,a=this.fixHooks[i];a||(this.fixHooks[i]=a=z.test(i)?this.mouseHooks:I.test(i)?this.keyHooks:{}),r=a.props?this.props.concat(a.props):this.props,e=new x.Event(s),t=r.length;while(t--)n=r[t],e[n]=s[n];return e.target||(e.target=o),3===e.target.nodeType&&(e.target=e.target.parentNode),a.filter?a.filter(e,s):e},special:{load:{noBubble:!0},focus:{trigger:function(){return this!==V()&&this.focus?(this.focus(),!1):undefined},delegateType:"focusin"},blur:{trigger:function(){return this===V()&&this.blur?(this.blur(),!1):undefined},delegateType:"focusout"},click:{trigger:function(){return"checkbox"===this.type&&this.click&&x.nodeName(this,"input")?(this.click(),!1):undefined},_default:function(e){return x.nodeName(e.target,"a")}},beforeunload:{postDispatch:function(e){e.result!==undefined&&(e.originalEvent.returnValue=e.result)}}},simulate:function(e,t,n,r){var i=x.extend(new x.Event,n,{type:e,isSimulated:!0,originalEvent:{}});r?x.event.trigger(i,null,t):x.event.dispatch.call(t,i),i.isDefaultPrevented()&&n.preventDefault()}},x.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n,!1)},x.Event=function(e,t){return this instanceof x.Event?(e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||e.getPreventDefault&&e.getPreventDefault()?U:Y):this.type=e,t&&x.extend(this,t),this.timeStamp=e&&e.timeStamp||x.now(),this[x.expando]=!0,undefined):new x.Event(e,t)},x.Event.prototype={isDefaultPrevented:Y,isPropagationStopped:Y,isImmediatePropagationStopped:Y,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=U,e&&e.preventDefault&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=U,e&&e.stopPropagation&&e.stopPropagation()},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=U,this.stopPropagation()}},x.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(e,t){x.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj;return(!i||i!==r&&!x.contains(r,i))&&(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),x.support.focusinBubbles||x.each({focus:"focusin",blur:"focusout"},function(e,t){var n=0,r=function(e){x.event.simulate(t,e.target,x.event.fix(e),!0)};x.event.special[t]={setup:function(){0===n++&&o.addEventListener(e,r,!0)},teardown:function(){0===--n&&o.removeEventListener(e,r,!0)}}}),x.fn.extend({on:function(e,t,n,r,i){var o,s;if("object"==typeof e){"string"!=typeof t&&(n=n||t,t=undefined);for(s in e)this.on(s,t,n,e[s],i);return this}if(null==n&&null==r?(r=t,n=t=undefined):null==r&&("string"==typeof t?(r=n,n=undefined):(r=n,n=t,t=undefined)),r===!1)r=Y;else if(!r)return this;return 1===i&&(o=r,r=function(e){return x().off(e),o.apply(this,arguments)},r.guid=o.guid||(o.guid=x.guid++)),this.each(function(){x.event.add(this,e,r,n,t)})},one:function(e,t,n,r){return this.on(e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,x(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return(t===!1||"function"==typeof t)&&(n=t,t=undefined),n===!1&&(n=Y),this.each(function(){x.event.remove(this,e,n,t)})},trigger:function(e,t){return this.each(function(){x.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];return n?x.event.trigger(e,t,n,!0):undefined}});var G=/^.[^:#\[\.,]*$/,J=/^(?:parents|prev(?:Until|All))/,Q=x.expr.match.needsContext,K={children:!0,contents:!0,next:!0,prev:!0};x.fn.extend({find:function(e){var t,n=[],r=this,i=r.length;if("string"!=typeof e)return this.pushStack(x(e).filter(function(){for(t=0;i>t;t++)if(x.contains(r[t],this))return!0}));for(t=0;i>t;t++)x.find(e,r[t],n);return n=this.pushStack(i>1?x.unique(n):n),n.selector=this.selector?this.selector+" "+e:e,n},has:function(e){var t=x(e,this),n=t.length;return this.filter(function(){var e=0;for(;n>e;e++)if(x.contains(this,t[e]))return!0})},not:function(e){return this.pushStack(et(this,e||[],!0))},filter:function(e){return this.pushStack(et(this,e||[],!1))},is:function(e){return!!et(this,"string"==typeof e&&Q.test(e)?x(e):e||[],!1).length},closest:function(e,t){var n,r=0,i=this.length,o=[],s=Q.test(e)||"string"!=typeof e?x(e,t||this.context):0;for(;i>r;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(11>n.nodeType&&(s?s.index(n)>-1:1===n.nodeType&&x.find.matchesSelector(n,e))){n=o.push(n);break}return this.pushStack(o.length>1?x.unique(o):o)},index:function(e){return e?"string"==typeof e?g.call(x(e),this[0]):g.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){var n="string"==typeof e?x(e,t):x.makeArray(e&&e.nodeType?[e]:e),r=x.merge(this.get(),n);return this.pushStack(x.unique(r))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}});function Z(e,t){while((e=e[t])&&1!==e.nodeType);return e}x.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return x.dir(e,"parentNode")},parentsUntil:function(e,t,n){return x.dir(e,"parentNode",n)},next:function(e){return Z(e,"nextSibling")},prev:function(e){return Z(e,"previousSibling")},nextAll:function(e){return x.dir(e,"nextSibling")},prevAll:function(e){return x.dir(e,"previousSibling")},nextUntil:function(e,t,n){return x.dir(e,"nextSibling",n)},prevUntil:function(e,t,n){return x.dir(e,"previousSibling",n)},siblings:function(e){return x.sibling((e.parentNode||{}).firstChild,e)},children:function(e){return x.sibling(e.firstChild)},contents:function(e){return e.contentDocument||x.merge([],e.childNodes)}},function(e,t){x.fn[e]=function(n,r){var i=x.map(this,t,n);return"Until"!==e.slice(-5)&&(r=n),r&&"string"==typeof r&&(i=x.filter(r,i)),this.length>1&&(K[e]||x.unique(i),J.test(e)&&i.reverse()),this.pushStack(i)}}),x.extend({filter:function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?x.find.matchesSelector(r,e)?[r]:[]:x.find.matches(e,x.grep(t,function(e){return 1===e.nodeType}))},dir:function(e,t,n){var r=[],i=n!==undefined;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&x(e).is(n))break;r.push(e)}return r},sibling:function(e,t){var n=[];for(;e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n}});function et(e,t,n){if(x.isFunction(t))return x.grep(e,function(e,r){return!!t.call(e,r,e)!==n});if(t.nodeType)return x.grep(e,function(e){return e===t!==n});if("string"==typeof t){if(G.test(t))return x.filter(t,e,n);t=x.filter(t,e)}return x.grep(e,function(e){return g.call(t,e)>=0!==n})}var tt=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,nt=/<([\w:]+)/,rt=/<|&#?\w+;/,it=/<(?:script|style|link)/i,ot=/^(?:checkbox|radio)$/i,st=/checked\s*(?:[^=]|=\s*.checked.)/i,at=/^$|\/(?:java|ecma)script/i,ut=/^true\/(.*)/,lt=/^\s*\s*$/g,ct={option:[1,""],thead:[1,"","
    "],col:[2,"","
    "],tr:[2,"","
    "],td:[3,"","
    "],_default:[0,"",""]};ct.optgroup=ct.option,ct.tbody=ct.tfoot=ct.colgroup=ct.caption=ct.thead,ct.th=ct.td,x.fn.extend({text:function(e){return x.access(this,function(e){return e===undefined?x.text(this):this.empty().append((this[0]&&this[0].ownerDocument||o).createTextNode(e))},null,e,arguments.length)},append:function(){return this.domManip(arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=pt(this,e);t.appendChild(e)}})},prepend:function(){return this.domManip(arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=pt(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return this.domManip(arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return this.domManip(arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},remove:function(e,t){var n,r=e?x.filter(e,this):this,i=0;for(;null!=(n=r[i]);i++)t||1!==n.nodeType||x.cleanData(mt(n)),n.parentNode&&(t&&x.contains(n.ownerDocument,n)&&dt(mt(n,"script")),n.parentNode.removeChild(n));return this},empty:function(){var e,t=0;for(;null!=(e=this[t]);t++)1===e.nodeType&&(x.cleanData(mt(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null==e?!1:e,t=null==t?e:t,this.map(function(){return x.clone(this,e,t)})},html:function(e){return x.access(this,function(e){var t=this[0]||{},n=0,r=this.length;if(e===undefined&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!it.test(e)&&!ct[(nt.exec(e)||["",""])[1].toLowerCase()]){e=e.replace(tt,"<$1>");try{for(;r>n;n++)t=this[n]||{},1===t.nodeType&&(x.cleanData(mt(t,!1)),t.innerHTML=e);t=0}catch(i){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var e=x.map(this,function(e){return[e.nextSibling,e.parentNode]}),t=0;return this.domManip(arguments,function(n){var r=e[t++],i=e[t++];i&&(r&&r.parentNode!==i&&(r=this.nextSibling),x(this).remove(),i.insertBefore(n,r))},!0),t?this:this.remove()},detach:function(e){return this.remove(e,!0)},domManip:function(e,t,n){e=f.apply([],e);var r,i,o,s,a,u,l=0,c=this.length,p=this,h=c-1,d=e[0],g=x.isFunction(d);if(g||!(1>=c||"string"!=typeof d||x.support.checkClone)&&st.test(d))return this.each(function(r){var i=p.eq(r);g&&(e[0]=d.call(this,r,i.html())),i.domManip(e,t,n)});if(c&&(r=x.buildFragment(e,this[0].ownerDocument,!1,!n&&this),i=r.firstChild,1===r.childNodes.length&&(r=i),i)){for(o=x.map(mt(r,"script"),ft),s=o.length;c>l;l++)a=r,l!==h&&(a=x.clone(a,!0,!0),s&&x.merge(o,mt(a,"script"))),t.call(this[l],a,l);if(s)for(u=o[o.length-1].ownerDocument,x.map(o,ht),l=0;s>l;l++)a=o[l],at.test(a.type||"")&&!q.access(a,"globalEval")&&x.contains(u,a)&&(a.src?x._evalUrl(a.src):x.globalEval(a.textContent.replace(lt,"")))}return this}}),x.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){x.fn[e]=function(e){var n,r=[],i=x(e),o=i.length-1,s=0;for(;o>=s;s++)n=s===o?this:this.clone(!0),x(i[s])[t](n),h.apply(r,n.get());return this.pushStack(r)}}),x.extend({clone:function(e,t,n){var r,i,o,s,a=e.cloneNode(!0),u=x.contains(e.ownerDocument,e);if(!(x.support.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||x.isXMLDoc(e)))for(s=mt(a),o=mt(e),r=0,i=o.length;i>r;r++)yt(o[r],s[r]);if(t)if(n)for(o=o||mt(e),s=s||mt(a),r=0,i=o.length;i>r;r++)gt(o[r],s[r]);else gt(e,a);return s=mt(a,"script"),s.length>0&&dt(s,!u&&mt(e,"script")),a},buildFragment:function(e,t,n,r){var i,o,s,a,u,l,c=0,p=e.length,f=t.createDocumentFragment(),h=[];for(;p>c;c++)if(i=e[c],i||0===i)if("object"===x.type(i))x.merge(h,i.nodeType?[i]:i);else if(rt.test(i)){o=o||f.appendChild(t.createElement("div")),s=(nt.exec(i)||["",""])[1].toLowerCase(),a=ct[s]||ct._default,o.innerHTML=a[1]+i.replace(tt,"<$1>")+a[2],l=a[0];while(l--)o=o.lastChild;x.merge(h,o.childNodes),o=f.firstChild,o.textContent=""}else h.push(t.createTextNode(i));f.textContent="",c=0;while(i=h[c++])if((!r||-1===x.inArray(i,r))&&(u=x.contains(i.ownerDocument,i),o=mt(f.appendChild(i),"script"),u&&dt(o),n)){l=0;while(i=o[l++])at.test(i.type||"")&&n.push(i)}return f},cleanData:function(e){var t,n,r,i,o,s,a=x.event.special,u=0;for(;(n=e[u])!==undefined;u++){if(F.accepts(n)&&(o=n[q.expando],o&&(t=q.cache[o]))){if(r=Object.keys(t.events||{}),r.length)for(s=0;(i=r[s])!==undefined;s++)a[i]?x.event.remove(n,i):x.removeEvent(n,i,t.handle);q.cache[o]&&delete q.cache[o]}delete L.cache[n[L.expando]]}},_evalUrl:function(e){return x.ajax({url:e,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})}});function pt(e,t){return x.nodeName(e,"table")&&x.nodeName(1===t.nodeType?t:t.firstChild,"tr")?e.getElementsByTagName("tbody")[0]||e.appendChild(e.ownerDocument.createElement("tbody")):e}function ft(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function ht(e){var t=ut.exec(e.type);return t?e.type=t[1]:e.removeAttribute("type"),e}function dt(e,t){var n=e.length,r=0;for(;n>r;r++)q.set(e[r],"globalEval",!t||q.get(t[r],"globalEval"))}function gt(e,t){var n,r,i,o,s,a,u,l;if(1===t.nodeType){if(q.hasData(e)&&(o=q.access(e),s=q.set(t,o),l=o.events)){delete s.handle,s.events={};for(i in l)for(n=0,r=l[i].length;r>n;n++)x.event.add(t,i,l[i][n])}L.hasData(e)&&(a=L.access(e),u=x.extend({},a),L.set(t,u))}}function mt(e,t){var n=e.getElementsByTagName?e.getElementsByTagName(t||"*"):e.querySelectorAll?e.querySelectorAll(t||"*"):[];return t===undefined||t&&x.nodeName(e,t)?x.merge([e],n):n}function yt(e,t){var n=t.nodeName.toLowerCase();"input"===n&&ot.test(e.type)?t.checked=e.checked:("input"===n||"textarea"===n)&&(t.defaultValue=e.defaultValue)}x.fn.extend({wrapAll:function(e){var t;return x.isFunction(e)?this.each(function(t){x(this).wrapAll(e.call(this,t))}):(this[0]&&(t=x(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this)},wrapInner:function(e){return x.isFunction(e)?this.each(function(t){x(this).wrapInner(e.call(this,t))}):this.each(function(){var t=x(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=x.isFunction(e);return this.each(function(n){x(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(){return this.parent().each(function(){x.nodeName(this,"body")||x(this).replaceWith(this.childNodes)}).end()}});var vt,xt,bt=/^(none|table(?!-c[ea]).+)/,wt=/^margin/,Tt=RegExp("^("+b+")(.*)$","i"),Ct=RegExp("^("+b+")(?!px)[a-z%]+$","i"),kt=RegExp("^([+-])=("+b+")","i"),Nt={BODY:"block"},Et={position:"absolute",visibility:"hidden",display:"block"},St={letterSpacing:0,fontWeight:400},jt=["Top","Right","Bottom","Left"],Dt=["Webkit","O","Moz","ms"];function At(e,t){if(t in e)return t;var n=t.charAt(0).toUpperCase()+t.slice(1),r=t,i=Dt.length;while(i--)if(t=Dt[i]+n,t in e)return t;return r}function Lt(e,t){return e=t||e,"none"===x.css(e,"display")||!x.contains(e.ownerDocument,e)}function qt(t){return e.getComputedStyle(t,null)}function Ht(e,t){var n,r,i,o=[],s=0,a=e.length;for(;a>s;s++)r=e[s],r.style&&(o[s]=q.get(r,"olddisplay"),n=r.style.display,t?(o[s]||"none"!==n||(r.style.display=""),""===r.style.display&&Lt(r)&&(o[s]=q.access(r,"olddisplay",Rt(r.nodeName)))):o[s]||(i=Lt(r),(n&&"none"!==n||!i)&&q.set(r,"olddisplay",i?n:x.css(r,"display"))));for(s=0;a>s;s++)r=e[s],r.style&&(t&&"none"!==r.style.display&&""!==r.style.display||(r.style.display=t?o[s]||"":"none"));return e}x.fn.extend({css:function(e,t){return x.access(this,function(e,t,n){var r,i,o={},s=0;if(x.isArray(t)){for(r=qt(e),i=t.length;i>s;s++)o[t[s]]=x.css(e,t[s],!1,r);return o}return n!==undefined?x.style(e,t,n):x.css(e,t)},e,t,arguments.length>1)},show:function(){return Ht(this,!0)},hide:function(){return Ht(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){Lt(this)?x(this).show():x(this).hide()})}}),x.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=vt(e,"opacity");return""===n?"1":n}}}},cssNumber:{columnCount:!0,fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":"cssFloat"},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,s,a=x.camelCase(t),u=e.style;return t=x.cssProps[a]||(x.cssProps[a]=At(u,a)),s=x.cssHooks[t]||x.cssHooks[a],n===undefined?s&&"get"in s&&(i=s.get(e,!1,r))!==undefined?i:u[t]:(o=typeof n,"string"===o&&(i=kt.exec(n))&&(n=(i[1]+1)*i[2]+parseFloat(x.css(e,t)),o="number"),null==n||"number"===o&&isNaN(n)||("number"!==o||x.cssNumber[a]||(n+="px"),x.support.clearCloneStyle||""!==n||0!==t.indexOf("background")||(u[t]="inherit"),s&&"set"in s&&(n=s.set(e,n,r))===undefined||(u[t]=n)),undefined)}},css:function(e,t,n,r){var i,o,s,a=x.camelCase(t);return t=x.cssProps[a]||(x.cssProps[a]=At(e.style,a)),s=x.cssHooks[t]||x.cssHooks[a],s&&"get"in s&&(i=s.get(e,!0,n)),i===undefined&&(i=vt(e,t,r)),"normal"===i&&t in St&&(i=St[t]),""===n||n?(o=parseFloat(i),n===!0||x.isNumeric(o)?o||0:i):i}}),vt=function(e,t,n){var r,i,o,s=n||qt(e),a=s?s.getPropertyValue(t)||s[t]:undefined,u=e.style;return s&&(""!==a||x.contains(e.ownerDocument,e)||(a=x.style(e,t)),Ct.test(a)&&wt.test(t)&&(r=u.width,i=u.minWidth,o=u.maxWidth,u.minWidth=u.maxWidth=u.width=a,a=s.width,u.width=r,u.minWidth=i,u.maxWidth=o)),a};function Ot(e,t,n){var r=Tt.exec(t);return r?Math.max(0,r[1]-(n||0))+(r[2]||"px"):t}function Ft(e,t,n,r,i){var o=n===(r?"border":"content")?4:"width"===t?1:0,s=0;for(;4>o;o+=2)"margin"===n&&(s+=x.css(e,n+jt[o],!0,i)),r?("content"===n&&(s-=x.css(e,"padding"+jt[o],!0,i)),"margin"!==n&&(s-=x.css(e,"border"+jt[o]+"Width",!0,i))):(s+=x.css(e,"padding"+jt[o],!0,i),"padding"!==n&&(s+=x.css(e,"border"+jt[o]+"Width",!0,i)));return s}function Pt(e,t,n){var r=!0,i="width"===t?e.offsetWidth:e.offsetHeight,o=qt(e),s=x.support.boxSizing&&"border-box"===x.css(e,"boxSizing",!1,o);if(0>=i||null==i){if(i=vt(e,t,o),(0>i||null==i)&&(i=e.style[t]),Ct.test(i))return i;r=s&&(x.support.boxSizingReliable||i===e.style[t]),i=parseFloat(i)||0}return i+Ft(e,t,n||(s?"border":"content"),r,o)+"px"}function Rt(e){var t=o,n=Nt[e];return n||(n=Mt(e,t),"none"!==n&&n||(xt=(xt||x("