commit 1b13777c3629334e88266fcc74f7657359f457d7 Author: admin1 Date: Mon Aug 22 11:28:34 2022 +0300 fixed vector size mismatch between matched and status vectors diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ce5dfea --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +cmake-build-debug +build +Build +*.*~ +.idea +doxgen_generated \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..ae78966 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,5 @@ +cmake_minimum_required(VERSION 2.8.8) + +add_subdirectory(camera_models) +add_subdirectory(loop_fusion) + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..9cecc1d --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ReadMe.md b/ReadMe.md new file mode 100644 index 0000000..9da9ae6 --- /dev/null +++ b/ReadMe.md @@ -0,0 +1,124 @@ + +# OpenVINS Secondary Pose Graph Example + + +This is an example secondary thread which provides loop closure in a loosely coupled manner for [OpenVINS](https://github.com/rpng/open_vins). +This code was originally developed by the HKUST aerial robotics group and can be found in the [VINS-Fusion](https://github.com/HKUST-Aerial-Robotics/VINS-Fusion) repository. +Here we stress that this is a loosely coupled method, thus no information is returned to the estimator to improve the underlying OpenVINS odometry. +This codebase has been modified in a few key areas including: exposing more loop closure parameters, subscribing to camera intrinsics, simplifying configuration such that only topics need to be supplied, and some tweaks to the loop closure detection to improve frequency. +Please see the below sections on the dependencies of the system along with a discussion of how the loop closure logic works, its improvements, and limitations. + + + +## Codebase Disclaimer + +This code is provided, as is, and is an example of how to use a secondary thread with the OpenVINS system. +Thus, this code has been directly adapted from the [VINS-Fusion](https://github.com/HKUST-Aerial-Robotics/VINS-Fusion) repository without a thorough investigation of the underlying code. +Therefore we don't have any guarantee of the accuracy or correctness. +Additionally, for questions about the underlying implementation (besides discussing the changes introduced from the original codebase) we might not be able to answer in detail. + + + +## Dependencies + +* OpenVINS - https://docs.openvins.com/gs-installing.html +* Ceres Solver - https://github.com/ceres-solver/ceres-solver + + + +## Installation Commands + + +``` +# setup our workspace +mkdir -p catkin_ws_ov/src/ +cd catkin_ws_ov +catkin init +# repositories to clone +cd src +git clone https://github.com/rpng/open_vins.git +git clone https://github.com/rpng/ov_secondary.git +# go back to root and build +cd .. +catkin build -j4 +# run the OpenVINS system and loop node +source devel/setup.bash +roslaunch ov_msckf pgeneva_ros_eth.launch +roslaunch loop_fusion posegraph.launch +``` + + + +## Example Results / Discussion + +We found that while the secondary pose graph works well in some cases, in many other cases it fails to improve performance or even hurt it. +For example on the EurocMav dataset there isn't a clear winner in terms of ATE accuracy of the OpenVINS odometry poses being fed into the loop closure module and its published states. +On these small room datasets, many loop closure candidates are rejected, and thus there are maybe 2-4 loop closures, with V1\_02\_medium dataset having none over the whole trajectory in most cases. +Also to ensure there are enough points for PnP, the number of tracked features in ov\_msckf needed to be increased to around 300. + +![ate eth example](data/ate_eth.png) + + +On the other hand, there are cases where the loop closure clearly helps. +For example, on a long dataset such as the corridor1 from the [TUM-VI](https://vision.in.tum.de/data/datasets/visual-inertial-dataset) dataset the system running in monocular mode drifts towards the end of the dataset (blue). +The loop closure (red) is able to correct this and ensure that the ending pose is correct relative to the start (the sensor system returns to the same location in the bottom left of this trajectory). + +![tum example](data/tum_example.png) + + +Looking at some more quantitative results on the TUM-VI dataset, a few runs on the room datasets can clearly show the advantage of loop closure. +These room datasets are limits to the same vicon room environment, and get very frequent loop closures due to this. +From the below table, it is very clear that using the secondary loop closure thread in most cases has a clear performance gain as compared to the standard odometry. + +![ate tumvi example](data/ate_tumvi.png) + + + +## How VINS-Fusion Loop Closure Works + + +1. We first wait to ensure we have received an initial camera intrinsic and camera to IMU extrinsic message. + - Both of these values will change over time if OpenVINS estimate them online + - Thus we will subscribe to updates to get the latest calibration +2. In `pose_graph_node.cpp` we get an image, pointcloud, odometry messages from the OpenVINS system. + - Our pointcloud is special and contains all 3d features (in the global frame) seen from the image + - Additionally, it has in its channels the raw uv coordinates, normalized coordinates, and feature id for all features + - In this case, our image/pointcloud/odometry will all be of the last marginalized clone in OpenVINS +3. Add the keyframe to our pose graph + - This is only done if we have moved enough (typically a few centimeters to remove stationary cases) + - Additionally, one can "skip" every few frames so the pose graph has less keyframes +4. Preprocess information of the keyframe + - We will store all the global 3d position of features and their corresponding uvs + - Additionally, more features are extracted to help with bag-of-word frame retrieval + - These are called `window_keypoints` and `keypoints` respectively + - Brief descriptors are extracted for all features +5. Trying to detect a loop closure + - In `pose_graph.cpp` the `addKeyFrame()` function will call on `detectLoop()` to try to find a keyframe + - This will query a [DBoW2](https://github.com/dorian3d/DBoW2) database with all past keyframes in it + - From the top *n* returned matches, we select the oldest one that passes all our thresholds + - This oldest will have the smallest id, and will need to be able to get a valid PnP pose calculated +6. Pose graph optimization + - If we have found a keyframe match, then we should optimize our graph + - The keyframe with a loop closure gets appended to `optimize_buf` and will be read by the `optimize4DoF()` thread + - Append all keyframes as parameters + - If a keyframe has a loop closure, thenappend that as an edge + - Always append the relative between the last frame and the current frame (based on OpenVINS odometry) + - After optimizing calculate how much the new keyframe should be "corrected" (e.g. `yaw_drift,r_drift,t_drift`) +7. Publish the current IMU pose as odometry + - It is important to note that the pose graph always "lags" behind the estimate + - The current state is rotated into the last added keyframe + - Then the "correction" to that original keyframe is applied to the state + - The corrected pose is then re-published (see `vio_callback()` function) + + + +## Identified Limitations + +* The graph has constant weighting, and thus a single bad loop closure can corrupt the whole estimate +* Keyframes can only be loop closed to once, thus a keyframe can't be continuously loop closed too +* Bag-of-word lookups can fail if not exactly near the same perspective +* How best to quantify that we have a good PnP result from RANSAC? +* Features that are used are never improved, nor optimized again, they are taken to be "true" +* Tuning the system is difficult and can hurt performance if not properly tuned + + diff --git a/camera_models/CMakeLists.txt b/camera_models/CMakeLists.txt new file mode 100644 index 0000000..29c13fa --- /dev/null +++ b/camera_models/CMakeLists.txt @@ -0,0 +1,70 @@ +cmake_minimum_required(VERSION 2.8.3) +project(camera_models) + +set(CMAKE_BUILD_TYPE "Release") +set(CMAKE_CXX_FLAGS "-std=c++11") +set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fPIC") + +find_package(catkin REQUIRED COMPONENTS + roscpp + std_msgs + ) + +find_package(Boost REQUIRED COMPONENTS filesystem program_options system) +include_directories(${Boost_INCLUDE_DIRS}) + +set(OpenCV_DIR "/home/admin1/podmivan/opencv-3.4.16/build") +set(CUDA_DIR "/usr/local/cuda-11.5") +find_package(OpenCV 3 REQUIRED) + +# set(EIGEN_INCLUDE_DIR "/usr/local/include/eigen3") +find_package(Ceres REQUIRED) +include_directories(${CERES_INCLUDE_DIRS}) + + +catkin_package( + INCLUDE_DIRS include + LIBRARIES camera_models + CATKIN_DEPENDS roscpp std_msgs +# DEPENDS system_lib + ) + +include_directories( + ${catkin_INCLUDE_DIRS} + ) + +include_directories("include") + +add_executable(Calibrations + src/intrinsic_calib.cc + src/chessboard/Chessboard.cc + src/calib/CameraCalibration.cc + src/camera_models/Camera.cc + src/camera_models/CameraFactory.cc + src/camera_models/CostFunctionFactory.cc + src/camera_models/PinholeCamera.cc + src/camera_models/PinholeFullCamera.cc + src/camera_models/CataCamera.cc + src/camera_models/EquidistantCamera.cc + src/camera_models/ScaramuzzaCamera.cc + src/sparse_graph/Transform.cc + src/gpl/gpl.cc + src/gpl/EigenQuaternionParameterization.cc) + +add_library(camera_models + src/chessboard/Chessboard.cc + src/calib/CameraCalibration.cc + src/camera_models/Camera.cc + src/camera_models/CameraFactory.cc + src/camera_models/CostFunctionFactory.cc + src/camera_models/PinholeCamera.cc + src/camera_models/PinholeFullCamera.cc + src/camera_models/CataCamera.cc + src/camera_models/EquidistantCamera.cc + src/camera_models/ScaramuzzaCamera.cc + src/sparse_graph/Transform.cc + src/gpl/gpl.cc + src/gpl/EigenQuaternionParameterization.cc) + +target_link_libraries(Calibrations ${Boost_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES}) +target_link_libraries(camera_models ${Boost_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES}) diff --git a/camera_models/camera_calib_example/calibrationdata/left-0000.png b/camera_models/camera_calib_example/calibrationdata/left-0000.png new file mode 100644 index 0000000..2fc0de4 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0000.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0001.png b/camera_models/camera_calib_example/calibrationdata/left-0001.png new file mode 100644 index 0000000..89a5d7e Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0001.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0002.png b/camera_models/camera_calib_example/calibrationdata/left-0002.png new file mode 100644 index 0000000..12dae33 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0002.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0003.png b/camera_models/camera_calib_example/calibrationdata/left-0003.png new file mode 100644 index 0000000..bc33f0e Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0003.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0004.png b/camera_models/camera_calib_example/calibrationdata/left-0004.png new file mode 100644 index 0000000..eb6d826 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0004.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0005.png b/camera_models/camera_calib_example/calibrationdata/left-0005.png new file mode 100644 index 0000000..771b9d6 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0005.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0006.png b/camera_models/camera_calib_example/calibrationdata/left-0006.png new file mode 100644 index 0000000..f74523f Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0006.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0007.png b/camera_models/camera_calib_example/calibrationdata/left-0007.png new file mode 100644 index 0000000..4f8825f Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0007.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0008.png b/camera_models/camera_calib_example/calibrationdata/left-0008.png new file mode 100644 index 0000000..06e435c Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0008.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0009.png b/camera_models/camera_calib_example/calibrationdata/left-0009.png new file mode 100644 index 0000000..d06b395 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0009.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0010.png b/camera_models/camera_calib_example/calibrationdata/left-0010.png new file mode 100644 index 0000000..b6e62b3 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0010.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0011.png b/camera_models/camera_calib_example/calibrationdata/left-0011.png new file mode 100644 index 0000000..13a531b Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0011.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0012.png b/camera_models/camera_calib_example/calibrationdata/left-0012.png new file mode 100644 index 0000000..610a28f Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0012.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0013.png b/camera_models/camera_calib_example/calibrationdata/left-0013.png new file mode 100644 index 0000000..ba9df7c Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0013.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0014.png b/camera_models/camera_calib_example/calibrationdata/left-0014.png new file mode 100644 index 0000000..247bf5e Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0014.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0015.png b/camera_models/camera_calib_example/calibrationdata/left-0015.png new file mode 100644 index 0000000..222b3b7 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0015.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0016.png b/camera_models/camera_calib_example/calibrationdata/left-0016.png new file mode 100644 index 0000000..864abca Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0016.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0017.png b/camera_models/camera_calib_example/calibrationdata/left-0017.png new file mode 100644 index 0000000..6292866 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0017.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0018.png b/camera_models/camera_calib_example/calibrationdata/left-0018.png new file mode 100644 index 0000000..c9fd031 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0018.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0019.png b/camera_models/camera_calib_example/calibrationdata/left-0019.png new file mode 100644 index 0000000..3802175 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0019.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0020.png b/camera_models/camera_calib_example/calibrationdata/left-0020.png new file mode 100644 index 0000000..eab6d24 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0020.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0021.png b/camera_models/camera_calib_example/calibrationdata/left-0021.png new file mode 100644 index 0000000..d8b9a90 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0021.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0022.png b/camera_models/camera_calib_example/calibrationdata/left-0022.png new file mode 100644 index 0000000..8254218 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0022.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0023.png b/camera_models/camera_calib_example/calibrationdata/left-0023.png new file mode 100644 index 0000000..999cc54 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0023.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0024.png b/camera_models/camera_calib_example/calibrationdata/left-0024.png new file mode 100644 index 0000000..becd591 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0024.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0025.png b/camera_models/camera_calib_example/calibrationdata/left-0025.png new file mode 100644 index 0000000..614156c Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0025.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0026.png b/camera_models/camera_calib_example/calibrationdata/left-0026.png new file mode 100644 index 0000000..9648ea8 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0026.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0027.png b/camera_models/camera_calib_example/calibrationdata/left-0027.png new file mode 100644 index 0000000..9b0ccb9 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0027.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0028.png b/camera_models/camera_calib_example/calibrationdata/left-0028.png new file mode 100644 index 0000000..1dec5f3 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0028.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0029.png b/camera_models/camera_calib_example/calibrationdata/left-0029.png new file mode 100644 index 0000000..43f830f Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0029.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0030.png b/camera_models/camera_calib_example/calibrationdata/left-0030.png new file mode 100644 index 0000000..d04595c Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0030.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0031.png b/camera_models/camera_calib_example/calibrationdata/left-0031.png new file mode 100644 index 0000000..c880b6b Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0031.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0032.png b/camera_models/camera_calib_example/calibrationdata/left-0032.png new file mode 100644 index 0000000..9bb78d2 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0032.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0033.png b/camera_models/camera_calib_example/calibrationdata/left-0033.png new file mode 100644 index 0000000..9914ff0 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0033.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0034.png b/camera_models/camera_calib_example/calibrationdata/left-0034.png new file mode 100644 index 0000000..938b0df Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0034.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0035.png b/camera_models/camera_calib_example/calibrationdata/left-0035.png new file mode 100644 index 0000000..8037ed8 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0035.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0036.png b/camera_models/camera_calib_example/calibrationdata/left-0036.png new file mode 100644 index 0000000..5bc149c Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0036.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0037.png b/camera_models/camera_calib_example/calibrationdata/left-0037.png new file mode 100644 index 0000000..5777c86 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0037.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0038.png b/camera_models/camera_calib_example/calibrationdata/left-0038.png new file mode 100644 index 0000000..7f0d88a Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0038.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0039.png b/camera_models/camera_calib_example/calibrationdata/left-0039.png new file mode 100644 index 0000000..82039af Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0039.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0040.png b/camera_models/camera_calib_example/calibrationdata/left-0040.png new file mode 100644 index 0000000..3cc8804 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0040.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0041.png b/camera_models/camera_calib_example/calibrationdata/left-0041.png new file mode 100644 index 0000000..ef31425 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0041.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0042.png b/camera_models/camera_calib_example/calibrationdata/left-0042.png new file mode 100644 index 0000000..823daa8 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0042.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0043.png b/camera_models/camera_calib_example/calibrationdata/left-0043.png new file mode 100644 index 0000000..f14b803 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0043.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0044.png b/camera_models/camera_calib_example/calibrationdata/left-0044.png new file mode 100644 index 0000000..b109a8b Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0044.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0045.png b/camera_models/camera_calib_example/calibrationdata/left-0045.png new file mode 100644 index 0000000..affcc5a Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0045.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0046.png b/camera_models/camera_calib_example/calibrationdata/left-0046.png new file mode 100644 index 0000000..11e6afe Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0046.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0047.png b/camera_models/camera_calib_example/calibrationdata/left-0047.png new file mode 100644 index 0000000..dd19626 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0047.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0048.png b/camera_models/camera_calib_example/calibrationdata/left-0048.png new file mode 100644 index 0000000..3470997 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0048.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0049.png b/camera_models/camera_calib_example/calibrationdata/left-0049.png new file mode 100644 index 0000000..3e19935 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0049.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0050.png b/camera_models/camera_calib_example/calibrationdata/left-0050.png new file mode 100644 index 0000000..f4865f8 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0050.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0051.png b/camera_models/camera_calib_example/calibrationdata/left-0051.png new file mode 100644 index 0000000..a72ebf9 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0051.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0052.png b/camera_models/camera_calib_example/calibrationdata/left-0052.png new file mode 100644 index 0000000..6cb8205 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0052.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0053.png b/camera_models/camera_calib_example/calibrationdata/left-0053.png new file mode 100644 index 0000000..dd034b3 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0053.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0054.png b/camera_models/camera_calib_example/calibrationdata/left-0054.png new file mode 100644 index 0000000..de6739a Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0054.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0055.png b/camera_models/camera_calib_example/calibrationdata/left-0055.png new file mode 100644 index 0000000..f54492e Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0055.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0056.png b/camera_models/camera_calib_example/calibrationdata/left-0056.png new file mode 100644 index 0000000..8ebf5c2 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0056.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0057.png b/camera_models/camera_calib_example/calibrationdata/left-0057.png new file mode 100644 index 0000000..ea3cad9 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0057.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0058.png b/camera_models/camera_calib_example/calibrationdata/left-0058.png new file mode 100644 index 0000000..473f3b7 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0058.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0059.png b/camera_models/camera_calib_example/calibrationdata/left-0059.png new file mode 100644 index 0000000..99d5114 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0059.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0060.png b/camera_models/camera_calib_example/calibrationdata/left-0060.png new file mode 100644 index 0000000..5253a4e Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0060.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0061.png b/camera_models/camera_calib_example/calibrationdata/left-0061.png new file mode 100644 index 0000000..da303fe Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0061.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0062.png b/camera_models/camera_calib_example/calibrationdata/left-0062.png new file mode 100644 index 0000000..e112114 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0062.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0063.png b/camera_models/camera_calib_example/calibrationdata/left-0063.png new file mode 100644 index 0000000..5914621 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0063.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0064.png b/camera_models/camera_calib_example/calibrationdata/left-0064.png new file mode 100644 index 0000000..bbd0439 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0064.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0065.png b/camera_models/camera_calib_example/calibrationdata/left-0065.png new file mode 100644 index 0000000..709f15b Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0065.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0066.png b/camera_models/camera_calib_example/calibrationdata/left-0066.png new file mode 100644 index 0000000..943c5ac Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0066.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0067.png b/camera_models/camera_calib_example/calibrationdata/left-0067.png new file mode 100644 index 0000000..d56b91e Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0067.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0068.png b/camera_models/camera_calib_example/calibrationdata/left-0068.png new file mode 100644 index 0000000..33bcbff Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0068.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0069.png b/camera_models/camera_calib_example/calibrationdata/left-0069.png new file mode 100644 index 0000000..07d9e14 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0069.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0070.png b/camera_models/camera_calib_example/calibrationdata/left-0070.png new file mode 100644 index 0000000..cf1926e Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0070.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0071.png b/camera_models/camera_calib_example/calibrationdata/left-0071.png new file mode 100644 index 0000000..0d739b2 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0071.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0072.png b/camera_models/camera_calib_example/calibrationdata/left-0072.png new file mode 100644 index 0000000..3bba0a8 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0072.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0073.png b/camera_models/camera_calib_example/calibrationdata/left-0073.png new file mode 100644 index 0000000..e8bd938 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0073.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0074.png b/camera_models/camera_calib_example/calibrationdata/left-0074.png new file mode 100644 index 0000000..9aa586e Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0074.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0075.png b/camera_models/camera_calib_example/calibrationdata/left-0075.png new file mode 100644 index 0000000..36923a5 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0075.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0076.png b/camera_models/camera_calib_example/calibrationdata/left-0076.png new file mode 100644 index 0000000..3a6c4f4 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0076.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0077.png b/camera_models/camera_calib_example/calibrationdata/left-0077.png new file mode 100644 index 0000000..7e75ec2 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0077.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0078.png b/camera_models/camera_calib_example/calibrationdata/left-0078.png new file mode 100644 index 0000000..da9ab4c Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0078.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0079.png b/camera_models/camera_calib_example/calibrationdata/left-0079.png new file mode 100644 index 0000000..0b47d1c Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0079.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0080.png b/camera_models/camera_calib_example/calibrationdata/left-0080.png new file mode 100644 index 0000000..c7d9376 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0080.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0081.png b/camera_models/camera_calib_example/calibrationdata/left-0081.png new file mode 100644 index 0000000..a06ada4 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0081.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0082.png b/camera_models/camera_calib_example/calibrationdata/left-0082.png new file mode 100644 index 0000000..d77971e Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0082.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0083.png b/camera_models/camera_calib_example/calibrationdata/left-0083.png new file mode 100644 index 0000000..b931338 Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0083.png differ diff --git a/camera_models/camera_calib_example/calibrationdata/left-0084.png b/camera_models/camera_calib_example/calibrationdata/left-0084.png new file mode 100644 index 0000000..a01d4cd Binary files /dev/null and b/camera_models/camera_calib_example/calibrationdata/left-0084.png differ diff --git a/camera_models/camera_calib_example/readme.txt b/camera_models/camera_calib_example/readme.txt new file mode 100644 index 0000000..f249647 --- /dev/null +++ b/camera_models/camera_calib_example/readme.txt @@ -0,0 +1,8 @@ +# help for checking input parameters. +rosrun camera_models Calibrations --help + +# example pinhole model. +rosrun camera_models Calibrations -w 12 -h 8 -s 80 -i calibrationdata --camera-model pinhole + +# example mei model. +rosrun camera_models Calibrations -w 12 -h 8 -s 80 -i calibrationdata --camera-model mei diff --git a/camera_models/include/camodocal/calib/CameraCalibration.h b/camera_models/include/camodocal/calib/CameraCalibration.h new file mode 100644 index 0000000..ac80f69 --- /dev/null +++ b/camera_models/include/camodocal/calib/CameraCalibration.h @@ -0,0 +1,81 @@ +#ifndef CAMERACALIBRATION_H +#define CAMERACALIBRATION_H + +#include + +#include "camodocal/camera_models/Camera.h" + +namespace camodocal +{ + +class CameraCalibration +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + CameraCalibration(); + + CameraCalibration(Camera::ModelType modelType, + const std::string& cameraName, + const cv::Size& imageSize, + const cv::Size& boardSize, + float squareSize); + + void clear(void); + + void addChessboardData(const std::vector& corners); + + bool calibrate(void); + + int sampleCount(void) const; + std::vector >& imagePoints(void); + const std::vector >& imagePoints(void) const; + std::vector >& scenePoints(void); + const std::vector >& scenePoints(void) const; + CameraPtr& camera(void); + const CameraConstPtr camera(void) const; + + Eigen::Matrix2d& measurementCovariance(void); + const Eigen::Matrix2d& measurementCovariance(void) const; + + cv::Mat& cameraPoses(void); + const cv::Mat& cameraPoses(void) const; + + void drawResults(std::vector& images) const; + + void writeParams(const std::string& filename) const; + + bool writeChessboardData(const std::string& filename) const; + bool readChessboardData(const std::string& filename); + + void setVerbose(bool verbose); + +private: + bool calibrateHelper(CameraPtr& camera, + std::vector& rvecs, std::vector& tvecs) const; + + void optimize(CameraPtr& camera, + std::vector& rvecs, std::vector& tvecs) const; + + template + void readData(std::ifstream& ifs, T& data) const; + + template + void writeData(std::ofstream& ofs, T data) const; + + cv::Size m_boardSize; + float m_squareSize; + + CameraPtr m_camera; + cv::Mat m_cameraPoses; + + std::vector > m_imagePoints; + std::vector > m_scenePoints; + + Eigen::Matrix2d m_measurementCovariance; + + bool m_verbose; +}; + +} + +#endif diff --git a/camera_models/include/camodocal/camera_models/Camera.h b/camera_models/include/camodocal/camera_models/Camera.h new file mode 100644 index 0000000..68e2bd7 --- /dev/null +++ b/camera_models/include/camodocal/camera_models/Camera.h @@ -0,0 +1,148 @@ +#ifndef CAMERA_H +#define CAMERA_H + +#include +#include +#include +#include + +namespace camodocal +{ + +class Camera +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + enum ModelType + { + KANNALA_BRANDT, + MEI, + PINHOLE, + PINHOLE_FULL, + SCARAMUZZA + }; + + class Parameters + { + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + Parameters( ModelType modelType ); + + Parameters( ModelType modelType, const std::string& cameraName, int w, int h ); + + ModelType& modelType( void ); + std::string& cameraName( void ); + int& imageWidth( void ); + int& imageHeight( void ); + + ModelType modelType( void ) const; + const std::string& cameraName( void ) const; + int imageWidth( void ) const; + int imageHeight( void ) const; + + int nIntrinsics( void ) const; + + virtual bool readFromYamlFile( const std::string& filename ) = 0; + virtual void writeToYamlFile( const std::string& filename ) const = 0; + + protected: + ModelType m_modelType; + int m_nIntrinsics; + std::string m_cameraName; + int m_imageWidth; + int m_imageHeight; + }; + + virtual ModelType modelType( void ) const = 0; + virtual const std::string& cameraName( void ) const = 0; + virtual int imageWidth( void ) const = 0; + virtual int imageHeight( void ) const = 0; + + virtual cv::Mat& mask( void ); + virtual const cv::Mat& mask( void ) const; + + virtual void estimateIntrinsics( const cv::Size& boardSize, + const std::vector< std::vector< cv::Point3f > >& objectPoints, + const std::vector< std::vector< cv::Point2f > >& imagePoints ) + = 0; + virtual void estimateExtrinsics( const std::vector< cv::Point3f >& objectPoints, + const std::vector< cv::Point2f >& imagePoints, + cv::Mat& rvec, + cv::Mat& tvec ) const; + + // Lift points from the image plane to the sphere + virtual void liftSphere( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const = 0; + //%output P + + // Lift points from the image plane to the projective space + virtual void liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const = 0; + //%output P + + // Projects 3D points to the image plane (Pi function) + virtual void spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p ) const = 0; + //%output p + + // Projects 3D points to the image plane (Pi function) + // and calculates jacobian + // virtual void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, + // Eigen::Matrix& J) const = 0; + //%output p + //%output J + + virtual void undistToPlane( const Eigen::Vector2d& p_u, Eigen::Vector2d& p ) const = 0; + //%output p + + // virtual void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) + // const = 0; + virtual cv::Mat initUndistortRectifyMap( cv::Mat& map1, + cv::Mat& map2, + float fx = -1.0f, + float fy = -1.0f, + cv::Size imageSize = cv::Size( 0, 0 ), + float cx = -1.0f, + float cy = -1.0f, + cv::Mat rmat = cv::Mat::eye( 3, 3, CV_32F ) ) const = 0; + + virtual int parameterCount( void ) const = 0; + + virtual void readParameters( const std::vector< double >& parameters ) = 0; + virtual void writeParameters( std::vector< double >& parameters ) const = 0; + + virtual void writeParametersToYamlFile( const std::string& filename ) const = 0; + + virtual std::string parametersToString( void ) const = 0; + + /** + * \brief Calculates the reprojection distance between points + * + * \param P1 first 3D point coordinates + * \param P2 second 3D point coordinates + * \return euclidean distance in the plane + */ + double reprojectionDist( const Eigen::Vector3d& P1, const Eigen::Vector3d& P2 ) const; + + double reprojectionError( const std::vector< std::vector< cv::Point3f > >& objectPoints, + const std::vector< std::vector< cv::Point2f > >& imagePoints, + const std::vector< cv::Mat >& rvecs, + const std::vector< cv::Mat >& tvecs, + cv::OutputArray perViewErrors = cv::noArray( ) ) const; + + double reprojectionError( const Eigen::Vector3d& P, + const Eigen::Quaterniond& camera_q, + const Eigen::Vector3d& camera_t, + const Eigen::Vector2d& observed_p ) const; + + void projectPoints( const std::vector< cv::Point3f >& objectPoints, + const cv::Mat& rvec, + const cv::Mat& tvec, + std::vector< cv::Point2f >& imagePoints ) const; + + protected: + cv::Mat m_mask; +}; + +typedef boost::shared_ptr< Camera > CameraPtr; +typedef boost::shared_ptr< const Camera > CameraConstPtr; +} + +#endif diff --git a/camera_models/include/camodocal/camera_models/CameraFactory.h b/camera_models/include/camodocal/camera_models/CameraFactory.h new file mode 100644 index 0000000..e3c2f28 --- /dev/null +++ b/camera_models/include/camodocal/camera_models/CameraFactory.h @@ -0,0 +1,32 @@ +#ifndef CAMERAFACTORY_H +#define CAMERAFACTORY_H + +#include +#include + +#include "camodocal/camera_models/Camera.h" + +namespace camodocal +{ + +class CameraFactory +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + CameraFactory(); + + static boost::shared_ptr instance(void); + + CameraPtr generateCamera(Camera::ModelType modelType, + const std::string& cameraName, + cv::Size imageSize) const; + + CameraPtr generateCameraFromYamlFile(const std::string& filename); + +private: + static boost::shared_ptr m_instance; +}; + +} + +#endif diff --git a/camera_models/include/camodocal/camera_models/CataCamera.h b/camera_models/include/camodocal/camera_models/CataCamera.h new file mode 100644 index 0000000..14384a5 --- /dev/null +++ b/camera_models/include/camodocal/camera_models/CataCamera.h @@ -0,0 +1,210 @@ +#ifndef CATACAMERA_H +#define CATACAMERA_H + +#include +#include + +#include "ceres/rotation.h" +#include "Camera.h" + +namespace camodocal +{ + +/** + * C. Mei, and P. Rives, Single View Point Omnidirectional Camera Calibration + * from Planar Grids, ICRA 2007 + */ + +class CataCamera: public Camera +{ +public: + class Parameters: public Camera::Parameters + { + public: + Parameters(); + Parameters(const std::string& cameraName, + int w, int h, + double xi, + double k1, double k2, double p1, double p2, + double gamma1, double gamma2, double u0, double v0); + + double& xi(void); + double& k1(void); + double& k2(void); + double& p1(void); + double& p2(void); + double& gamma1(void); + double& gamma2(void); + double& u0(void); + double& v0(void); + + double xi(void) const; + double k1(void) const; + double k2(void) const; + double p1(void) const; + double p2(void) const; + double gamma1(void) const; + double gamma2(void) const; + double u0(void) const; + double v0(void) const; + + bool readFromYamlFile(const std::string& filename); + void writeToYamlFile(const std::string& filename) const; + + Parameters& operator=(const Parameters& other); + friend std::ostream& operator<< (std::ostream& out, const Parameters& params); + + private: + double m_xi; + double m_k1; + double m_k2; + double m_p1; + double m_p2; + double m_gamma1; + double m_gamma2; + double m_u0; + double m_v0; + }; + + CataCamera(); + + /** + * \brief Constructor from the projection model parameters + */ + CataCamera(const std::string& cameraName, + int imageWidth, int imageHeight, + double xi, double k1, double k2, double p1, double p2, + double gamma1, double gamma2, double u0, double v0); + /** + * \brief Constructor from the projection model parameters + */ + CataCamera(const Parameters& params); + + Camera::ModelType modelType(void) const; + const std::string& cameraName(void) const; + int imageWidth(void) const; + int imageHeight(void) const; + + void estimateIntrinsics(const cv::Size& boardSize, + const std::vector< std::vector >& objectPoints, + const std::vector< std::vector >& imagePoints); + + // Lift points from the image plane to the sphere + void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; + //%output P + + // Lift points from the image plane to the projective space + void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; + //%output P + + // Projects 3D points to the image plane (Pi function) + void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const; + //%output p + + // Projects 3D points to the image plane (Pi function) + // and calculates jacobian + void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, + Eigen::Matrix& J) const; + //%output p + //%output J + + void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const; + //%output p + + template + static void spaceToPlane(const T* const params, + const T* const q, const T* const t, + const Eigen::Matrix& P, + Eigen::Matrix& p); + + void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const; + void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, + Eigen::Matrix2d& J) const; + + void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const; + cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, + float fx = -1.0f, float fy = -1.0f, + cv::Size imageSize = cv::Size(0, 0), + float cx = -1.0f, float cy = -1.0f, + cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const; + + int parameterCount(void) const; + + const Parameters& getParameters(void) const; + void setParameters(const Parameters& parameters); + + void readParameters(const std::vector& parameterVec); + void writeParameters(std::vector& parameterVec) const; + + void writeParametersToYamlFile(const std::string& filename) const; + + std::string parametersToString(void) const; + +private: + Parameters mParameters; + + double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23; + bool m_noDistortion; +}; + +typedef boost::shared_ptr CataCameraPtr; +typedef boost::shared_ptr CataCameraConstPtr; + +template +void +CataCamera::spaceToPlane(const T* const params, + const T* const q, const T* const t, + const Eigen::Matrix& P, + Eigen::Matrix& p) +{ + T P_w[3]; + P_w[0] = T(P(0)); + P_w[1] = T(P(1)); + P_w[2] = T(P(2)); + + // Convert quaternion from Eigen convention (x, y, z, w) + // to Ceres convention (w, x, y, z) + T q_ceres[4] = {q[3], q[0], q[1], q[2]}; + + T P_c[3]; + ceres::QuaternionRotatePoint(q_ceres, P_w, P_c); + + P_c[0] += t[0]; + P_c[1] += t[1]; + P_c[2] += t[2]; + + // project 3D object point to the image plane + T xi = params[0]; + T k1 = params[1]; + T k2 = params[2]; + T p1 = params[3]; + T p2 = params[4]; + T gamma1 = params[5]; + T gamma2 = params[6]; + T alpha = T(0); //cameraParams.alpha(); + T u0 = params[7]; + T v0 = params[8]; + + // Transform to model plane + T len = sqrt(P_c[0] * P_c[0] + P_c[1] * P_c[1] + P_c[2] * P_c[2]); + P_c[0] /= len; + P_c[1] /= len; + P_c[2] /= len; + + T u = P_c[0] / (P_c[2] + xi); + T v = P_c[1] / (P_c[2] + xi); + + T rho_sqr = u * u + v * v; + T L = T(1.0) + k1 * rho_sqr + k2 * rho_sqr * rho_sqr; + T du = T(2.0) * p1 * u * v + p2 * (rho_sqr + T(2.0) * u * u); + T dv = p1 * (rho_sqr + T(2.0) * v * v) + T(2.0) * p2 * u * v; + + u = L * u + du; + v = L * v + dv; + p(0) = gamma1 * (u + alpha * v) + u0; + p(1) = gamma2 * v + v0; +} + +} + +#endif diff --git a/camera_models/include/camodocal/camera_models/CostFunctionFactory.h b/camera_models/include/camodocal/camera_models/CostFunctionFactory.h new file mode 100644 index 0000000..40260da --- /dev/null +++ b/camera_models/include/camodocal/camera_models/CostFunctionFactory.h @@ -0,0 +1,82 @@ +#ifndef COSTFUNCTIONFACTORY_H +#define COSTFUNCTIONFACTORY_H + +#include +#include + +#include "camodocal/camera_models/Camera.h" + +namespace ceres +{ + class CostFunction; +} + +namespace camodocal +{ + +enum +{ + CAMERA_INTRINSICS = 1 << 0, + CAMERA_POSE = 1 << 1, + POINT_3D = 1 << 2, + ODOMETRY_INTRINSICS = 1 << 3, + ODOMETRY_3D_POSE = 1 << 4, + ODOMETRY_6D_POSE = 1 << 5, + CAMERA_ODOMETRY_TRANSFORM = 1 << 6 +}; + +class CostFunctionFactory +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + CostFunctionFactory(); + + static boost::shared_ptr instance(void); + + ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, + const Eigen::Vector3d& observed_P, + const Eigen::Vector2d& observed_p, + int flags) const; + + ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, + const Eigen::Vector3d& observed_P, + const Eigen::Vector2d& observed_p, + const Eigen::Matrix2d& sqrtPrecisionMat, + int flags) const; + + ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, + const Eigen::Vector2d& observed_p, + int flags, bool optimize_cam_odo_z = true) const; + + ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, + const Eigen::Vector2d& observed_p, + const Eigen::Matrix2d& sqrtPrecisionMat, + int flags, bool optimize_cam_odo_z = true) const; + + ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, + const Eigen::Vector3d& odo_pos, + const Eigen::Vector3d& odo_att, + const Eigen::Vector2d& observed_p, + int flags, bool optimize_cam_odo_z = true) const; + + ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, + const Eigen::Quaterniond& cam_odo_q, + const Eigen::Vector3d& cam_odo_t, + const Eigen::Vector3d& odo_pos, + const Eigen::Vector3d& odo_att, + const Eigen::Vector2d& observed_p, + int flags) const; + + ceres::CostFunction* generateCostFunction(const CameraConstPtr& cameraLeft, + const CameraConstPtr& cameraRight, + const Eigen::Vector3d& observed_P, + const Eigen::Vector2d& observed_p_left, + const Eigen::Vector2d& observed_p_right) const; + +private: + static boost::shared_ptr m_instance; +}; + +} + +#endif diff --git a/camera_models/include/camodocal/camera_models/EquidistantCamera.h b/camera_models/include/camodocal/camera_models/EquidistantCamera.h new file mode 100644 index 0000000..4e172d3 --- /dev/null +++ b/camera_models/include/camodocal/camera_models/EquidistantCamera.h @@ -0,0 +1,215 @@ +#ifndef EQUIDISTANTCAMERA_H +#define EQUIDISTANTCAMERA_H + +#include +#include + +#include "ceres/rotation.h" +#include "Camera.h" + +namespace camodocal +{ + +/** + * J. Kannala, and S. Brandt, A Generic Camera Model and Calibration Method + * for Conventional, Wide-Angle, and Fish-Eye Lenses, PAMI 2006 + */ + +class EquidistantCamera: public Camera +{ +public: + class Parameters: public Camera::Parameters + { + public: + Parameters(); + Parameters(const std::string& cameraName, + int w, int h, + double k2, double k3, double k4, double k5, + double mu, double mv, + double u0, double v0); + + double& k2(void); + double& k3(void); + double& k4(void); + double& k5(void); + double& mu(void); + double& mv(void); + double& u0(void); + double& v0(void); + + double k2(void) const; + double k3(void) const; + double k4(void) const; + double k5(void) const; + double mu(void) const; + double mv(void) const; + double u0(void) const; + double v0(void) const; + + bool readFromYamlFile(const std::string& filename); + void writeToYamlFile(const std::string& filename) const; + + Parameters& operator=(const Parameters& other); + friend std::ostream& operator<< (std::ostream& out, const Parameters& params); + + private: + // projection + double m_k2; + double m_k3; + double m_k4; + double m_k5; + + double m_mu; + double m_mv; + double m_u0; + double m_v0; + }; + + EquidistantCamera(); + + /** + * \brief Constructor from the projection model parameters + */ + EquidistantCamera(const std::string& cameraName, + int imageWidth, int imageHeight, + double k2, double k3, double k4, double k5, + double mu, double mv, + double u0, double v0); + /** + * \brief Constructor from the projection model parameters + */ + EquidistantCamera(const Parameters& params); + + Camera::ModelType modelType(void) const; + const std::string& cameraName(void) const; + int imageWidth(void) const; + int imageHeight(void) const; + + void estimateIntrinsics(const cv::Size& boardSize, + const std::vector< std::vector >& objectPoints, + const std::vector< std::vector >& imagePoints); + + // Lift points from the image plane to the sphere + virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; + //%output P + + // Lift points from the image plane to the projective space + void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; + //%output P + + // Projects 3D points to the image plane (Pi function) + void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const; + //%output p + + // Projects 3D points to the image plane (Pi function) + // and calculates jacobian + void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, + Eigen::Matrix& J) const; + //%output p + //%output J + + void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const; + //%output p + + template + static void spaceToPlane(const T* const params, + const T* const q, const T* const t, + const Eigen::Matrix& P, + Eigen::Matrix& p); + + void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const; + cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, + float fx = -1.0f, float fy = -1.0f, + cv::Size imageSize = cv::Size(0, 0), + float cx = -1.0f, float cy = -1.0f, + cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const; + + int parameterCount(void) const; + + const Parameters& getParameters(void) const; + void setParameters(const Parameters& parameters); + + void readParameters(const std::vector& parameterVec); + void writeParameters(std::vector& parameterVec) const; + + void writeParametersToYamlFile(const std::string& filename) const; + + std::string parametersToString(void) const; + +private: + template + static T r(T k2, T k3, T k4, T k5, T theta); + + + void fitOddPoly(const std::vector& x, const std::vector& y, + int n, std::vector& coeffs) const; + + void backprojectSymmetric(const Eigen::Vector2d& p_u, + double& theta, double& phi) const; + + Parameters mParameters; + + double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23; +}; + +typedef boost::shared_ptr EquidistantCameraPtr; +typedef boost::shared_ptr EquidistantCameraConstPtr; + +template +T +EquidistantCamera::r(T k2, T k3, T k4, T k5, T theta) +{ + // k1 = 1 + return theta + + k2 * theta * theta * theta + + k3 * theta * theta * theta * theta * theta + + k4 * theta * theta * theta * theta * theta * theta * theta + + k5 * theta * theta * theta * theta * theta * theta * theta * theta * theta; +} + +template +void +EquidistantCamera::spaceToPlane(const T* const params, + const T* const q, const T* const t, + const Eigen::Matrix& P, + Eigen::Matrix& p) +{ + T P_w[3]; + P_w[0] = T(P(0)); + P_w[1] = T(P(1)); + P_w[2] = T(P(2)); + + // Convert quaternion from Eigen convention (x, y, z, w) + // to Ceres convention (w, x, y, z) + T q_ceres[4] = {q[3], q[0], q[1], q[2]}; + + T P_c[3]; + ceres::QuaternionRotatePoint(q_ceres, P_w, P_c); + + P_c[0] += t[0]; + P_c[1] += t[1]; + P_c[2] += t[2]; + + // project 3D object point to the image plane; + T k2 = params[0]; + T k3 = params[1]; + T k4 = params[2]; + T k5 = params[3]; + T mu = params[4]; + T mv = params[5]; + T u0 = params[6]; + T v0 = params[7]; + + T len = sqrt(P_c[0] * P_c[0] + P_c[1] * P_c[1] + P_c[2] * P_c[2]); + T theta = acos(P_c[2] / len); + T phi = atan2(P_c[1], P_c[0]); + + Eigen::Matrix p_u = r(k2, k3, k4, k5, theta) * Eigen::Matrix(cos(phi), sin(phi)); + + p(0) = mu * p_u(0) + u0; + p(1) = mv * p_u(1) + v0; +} + +} + +#endif diff --git a/camera_models/include/camodocal/camera_models/PinholeCamera.h b/camera_models/include/camodocal/camera_models/PinholeCamera.h new file mode 100644 index 0000000..f368a0b --- /dev/null +++ b/camera_models/include/camodocal/camera_models/PinholeCamera.h @@ -0,0 +1,196 @@ +#ifndef PINHOLECAMERA_H +#define PINHOLECAMERA_H + +#include +#include + +#include "ceres/rotation.h" +#include "Camera.h" + +namespace camodocal +{ + +class PinholeCamera: public Camera +{ +public: + class Parameters: public Camera::Parameters + { + public: + Parameters(); + Parameters(const std::string& cameraName, + int w, int h, + double k1, double k2, double p1, double p2, + double fx, double fy, double cx, double cy); + + double& k1(void); + double& k2(void); + double& p1(void); + double& p2(void); + double& fx(void); + double& fy(void); + double& cx(void); + double& cy(void); + + double xi(void) const; + double k1(void) const; + double k2(void) const; + double p1(void) const; + double p2(void) const; + double fx(void) const; + double fy(void) const; + double cx(void) const; + double cy(void) const; + + bool readFromYamlFile(const std::string& filename); + void writeToYamlFile(const std::string& filename) const; + + Parameters& operator=(const Parameters& other); + friend std::ostream& operator<< (std::ostream& out, const Parameters& params); + + private: + double m_k1; + double m_k2; + double m_p1; + double m_p2; + double m_fx; + double m_fy; + double m_cx; + double m_cy; + }; + + PinholeCamera(); + + /** + * \brief Constructor from the projection model parameters + */ + PinholeCamera(const std::string& cameraName, + int imageWidth, int imageHeight, + double k1, double k2, double p1, double p2, + double fx, double fy, double cx, double cy); + /** + * \brief Constructor from the projection model parameters + */ + PinholeCamera(const Parameters& params); + + Camera::ModelType modelType(void) const; + const std::string& cameraName(void) const; + int imageWidth(void) const; + int imageHeight(void) const; + + void estimateIntrinsics(const cv::Size& boardSize, + const std::vector< std::vector >& objectPoints, + const std::vector< std::vector >& imagePoints); + + // Lift points from the image plane to the sphere + virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; + //%output P + + // Lift points from the image plane to the projective space + void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; + //%output P + + // Projects 3D points to the image plane (Pi function) + void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const; + //%output p + + // Projects 3D points to the image plane (Pi function) + // and calculates jacobian + void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, + Eigen::Matrix& J) const; + //%output p + //%output J + + void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const; + //%output p + + template + static void spaceToPlane(const T* const params, + const T* const q, const T* const t, + const Eigen::Matrix& P, + Eigen::Matrix& p); + + void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const; + void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, + Eigen::Matrix2d& J) const; + + void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const; + cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, + float fx = -1.0f, float fy = -1.0f, + cv::Size imageSize = cv::Size(0, 0), + float cx = -1.0f, float cy = -1.0f, + cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const; + + int parameterCount(void) const; + + const Parameters& getParameters(void) const; + void setParameters(const Parameters& parameters); + + void readParameters(const std::vector& parameterVec); + void writeParameters(std::vector& parameterVec) const; + + void writeParametersToYamlFile(const std::string& filename) const; + + std::string parametersToString(void) const; + +private: + Parameters mParameters; + + double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23; + bool m_noDistortion; +}; + +typedef boost::shared_ptr PinholeCameraPtr; +typedef boost::shared_ptr PinholeCameraConstPtr; + +template +void +PinholeCamera::spaceToPlane(const T* const params, + const T* const q, const T* const t, + const Eigen::Matrix& P, + Eigen::Matrix& p) +{ + T P_w[3]; + P_w[0] = T(P(0)); + P_w[1] = T(P(1)); + P_w[2] = T(P(2)); + + // Convert quaternion from Eigen convention (x, y, z, w) + // to Ceres convention (w, x, y, z) + T q_ceres[4] = {q[3], q[0], q[1], q[2]}; + + T P_c[3]; + ceres::QuaternionRotatePoint(q_ceres, P_w, P_c); + + P_c[0] += t[0]; + P_c[1] += t[1]; + P_c[2] += t[2]; + + // project 3D object point to the image plane + T k1 = params[0]; + T k2 = params[1]; + T p1 = params[2]; + T p2 = params[3]; + T fx = params[4]; + T fy = params[5]; + T alpha = T(0); //cameraParams.alpha(); + T cx = params[6]; + T cy = params[7]; + + // Transform to model plane + T u = P_c[0] / P_c[2]; + T v = P_c[1] / P_c[2]; + + T rho_sqr = u * u + v * v; + T L = T(1.0) + k1 * rho_sqr + k2 * rho_sqr * rho_sqr; + T du = T(2.0) * p1 * u * v + p2 * (rho_sqr + T(2.0) * u * u); + T dv = p1 * (rho_sqr + T(2.0) * v * v) + T(2.0) * p2 * u * v; + + u = L * u + du; + v = L * v + dv; + p(0) = fx * (u + alpha * v) + cx; + p(1) = fy * v + cy; +} + +} + +#endif diff --git a/camera_models/include/camodocal/camera_models/PinholeFullCamera.h b/camera_models/include/camodocal/camera_models/PinholeFullCamera.h new file mode 100644 index 0000000..b391c8a --- /dev/null +++ b/camera_models/include/camodocal/camera_models/PinholeFullCamera.h @@ -0,0 +1,274 @@ +#ifndef PinholeFullCAMERA_H +#define PinholeFullCAMERA_H + +#include +#include + +#include "Camera.h" +#include "ceres/rotation.h" + +namespace camodocal +{ + +class PinholeFullCamera : public Camera +{ + public: + class Parameters : public Camera::Parameters + { + public: + Parameters( ); + Parameters( const std::string& cameraName, + int w, + int h, + double k1, + double k2, + double k3, + double k4, + double k5, + double k6, + double p1, + double p2, + double fx, + double fy, + double cx, + double cy ); + + double& k1( void ); + double& k2( void ); + double& k3( void ); + double& k4( void ); + double& k5( void ); + double& k6( void ); + double& p1( void ); + double& p2( void ); + double& fx( void ); + double& fy( void ); + double& cx( void ); + double& cy( void ); + + double xi( void ) const; + double k1( void ) const; + double k2( void ) const; + double k3( void ) const; + double k4( void ) const; + double k5( void ) const; + double k6( void ) const; + double p1( void ) const; + double p2( void ) const; + double fx( void ) const; + double fy( void ) const; + double cx( void ) const; + double cy( void ) const; + + bool readFromYamlFile( const std::string& filename ); + void writeToYamlFile( const std::string& filename ) const; + + Parameters& operator=( const Parameters& other ); + friend std::ostream& operator<<( std::ostream& out, const Parameters& params ); + + private: + double m_k1; + double m_k2; + double m_p1; + double m_p2; + double m_fx; + double m_fy; + double m_cx; + double m_cy; + double m_k3; + double m_k4; + double m_k5; + double m_k6; + }; + + PinholeFullCamera( ); + + /** + * \brief Constructor from the projection model parameters + */ + PinholeFullCamera( const std::string& cameraName, + int imageWidth, + int imageHeight, + double k1, + double k2, + double k3, + double k4, + double k5, + double k6, + double p1, + double p2, + double fx, + double fy, + double cx, + double cy ); + /** + * \brief Constructor from the projection model parameters + */ + PinholeFullCamera( const Parameters& params ); + + Camera::ModelType modelType( void ) const; + const std::string& cameraName( void ) const; + int imageWidth( void ) const; + int imageHeight( void ) const; + cv::Size imageSize( ) const { return cv::Size( imageWidth( ), imageHeight( ) ); } + cv::Point2f getPrinciple( ) const + { + return cv::Point2f( mParameters.cx( ), mParameters.cy( ) ); + } + + void estimateIntrinsics( const cv::Size& boardSize, + const std::vector< std::vector< cv::Point3f > >& objectPoints, + const std::vector< std::vector< cv::Point2f > >& imagePoints ); + + void setInitIntrinsics( const std::vector< std::vector< cv::Point3f > >& objectPoints, + const std::vector< std::vector< cv::Point2f > >& imagePoints ) + { + Parameters params = getParameters( ); + + params.k1( ) = 0.0; + params.k2( ) = 0.0; + params.k3( ) = 0.0; + params.k4( ) = 0.0; + params.k5( ) = 0.0; + params.k6( ) = 0.0; + params.p1( ) = 0.0; + params.p2( ) = 0.0; + + double cx = params.imageWidth( ) / 2.0; + double cy = params.imageHeight( ) / 2.0; + params.cx( ) = cx; + params.cy( ) = cy; + params.fx( ) = 1200; + params.fy( ) = 1200; + + setParameters( params ); + } + + // Lift points from the image plane to the sphere + virtual void liftSphere( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const; + //%output P + + // Lift points from the image plane to the projective space + void liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const; + //%output P + + void liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P, float image_scale ) const; + + // Projects 3D points to the image plane (Pi function) + void spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p ) const; + //%output p + + void spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p, float image_scalse ) const; + + // Projects 3D points to the image plane (Pi function) + // and calculates jacobian + void spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p, Eigen::Matrix< double, 2, 3 >& J ) const; + //%output p + //%output J + + void undistToPlane( const Eigen::Vector2d& p_u, Eigen::Vector2d& p ) const; + //%output p + + template< typename T > + static void spaceToPlane( const T* const params, + const T* const q, + const T* const t, + const Eigen::Matrix< T, 3, 1 >& P, + Eigen::Matrix< T, 2, 1 >& p ); + + void distortion( const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u ) const; + void distortion( const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, Eigen::Matrix2d& J ) const; + + void initUndistortMap( cv::Mat& map1, cv::Mat& map2, double fScale = 1.0 ) const; + cv::Mat initUndistortRectifyMap( cv::Mat& map1, + cv::Mat& map2, + float fx = -1.0f, + float fy = -1.0f, + cv::Size imageSize = cv::Size( 0, 0 ), + float cx = -1.0f, + float cy = -1.0f, + cv::Mat rmat = cv::Mat::eye( 3, 3, CV_32F ) ) const; + + int parameterCount( void ) const; + + const Parameters& getParameters( void ) const; + void setParameters( const Parameters& parameters ); + + void readParameters( const std::vector< double >& parameterVec ); + void writeParameters( std::vector< double >& parameterVec ) const; + + void writeParametersToYamlFile( const std::string& filename ) const; + + std::string parametersToString( void ) const; + + private: + Parameters mParameters; + + double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23; + bool m_noDistortion; +}; + +typedef boost::shared_ptr< PinholeFullCamera > PinholeFullCameraPtr; +typedef boost::shared_ptr< const PinholeFullCamera > PinholeFullCameraConstPtr; + +template< typename T > +void +PinholeFullCamera::spaceToPlane( const T* const params, + const T* const q, + const T* const t, + const Eigen::Matrix< T, 3, 1 >& P, + Eigen::Matrix< T, 2, 1 >& p ) +{ + T P_w[3]; + P_w[0] = T( P( 0 ) ); + P_w[1] = T( P( 1 ) ); + P_w[2] = T( P( 2 ) ); + + // Convert quaternion from Eigen convention (x, y, z, w) + // to Ceres convention (w, x, y, z) + T q_ceres[4] = { q[3], q[0], q[1], q[2] }; + + T P_c[3]; + ceres::QuaternionRotatePoint( q_ceres, P_w, P_c ); + + P_c[0] += t[0]; + P_c[1] += t[1]; + P_c[2] += t[2]; + + // project 3D object point to the image plane + T k1 = params[0]; + T k2 = params[1]; + T k3 = params[2]; + T k4 = params[3]; + T k5 = params[4]; + T k6 = params[5]; + T p1 = params[6]; + T p2 = params[7]; + T fx = params[8]; + T fy = params[9]; + T alpha = T( 0 ); // cameraParams.alpha(); + T cx = params[10]; + T cy = params[11]; + + // Transform to model plane + T x = P_c[0] / P_c[2]; + T y = P_c[1] / P_c[2]; + // T z = P_c[2] / P_c[2]; + + T r2 = x * x + y * y; + T r4 = r2 * r2; + T r6 = r4 * r2; + T a1 = T( 2 ) * x * y; + T a2 = r2 + T( 2 ) * x * x; + T a3 = r2 + T( 2 ) * y * y; + T cdist = T( 1 ) + k1 * r2 + k2 * r4 + k3 * r6; + T icdist2 = T( 1. ) / ( T( 1 ) + k4 * r2 + k5 * r4 + k6 * r6 ); + T xd0 = x * cdist * icdist2 + p1 * a1 + p2 * a2; // + k[8] * r2 + k[9] * r4; + T yd0 = y * cdist * icdist2 + p1 * a3 + p2 * a1; // + k[10] * r2 + k[11] * r4; + + p( 0 ) = xd0 * fx + cx; + p( 1 ) = yd0 * fy + cy; +} +} + +#endif diff --git a/camera_models/include/camodocal/camera_models/ScaramuzzaCamera.h b/camera_models/include/camodocal/camera_models/ScaramuzzaCamera.h new file mode 100644 index 0000000..3d54aca --- /dev/null +++ b/camera_models/include/camodocal/camera_models/ScaramuzzaCamera.h @@ -0,0 +1,348 @@ +#ifndef SCARAMUZZACAMERA_H +#define SCARAMUZZACAMERA_H + +#include +#include + +#include "ceres/rotation.h" +#include "Camera.h" + +namespace camodocal +{ + +#define SCARAMUZZA_POLY_SIZE 5 +#define SCARAMUZZA_INV_POLY_SIZE 20 + +#define SCARAMUZZA_CAMERA_NUM_PARAMS (SCARAMUZZA_POLY_SIZE + SCARAMUZZA_INV_POLY_SIZE + 2 /*center*/ + 3 /*affine*/) + +/** + * Scaramuzza Camera (Omnidirectional) + * https://sites.google.com/site/scarabotix/ocamcalib-toolbox + */ + +class OCAMCamera: public Camera +{ +public: + class Parameters: public Camera::Parameters + { + public: + Parameters(); + + double& C(void) { return m_C; } + double& D(void) { return m_D; } + double& E(void) { return m_E; } + + double& center_x(void) { return m_center_x; } + double& center_y(void) { return m_center_y; } + + double& poly(int idx) { return m_poly[idx]; } + double& inv_poly(int idx) { return m_inv_poly[idx]; } + + double C(void) const { return m_C; } + double D(void) const { return m_D; } + double E(void) const { return m_E; } + + double center_x(void) const { return m_center_x; } + double center_y(void) const { return m_center_y; } + + double poly(int idx) const { return m_poly[idx]; } + double inv_poly(int idx) const { return m_inv_poly[idx]; } + + bool readFromYamlFile(const std::string& filename); + void writeToYamlFile(const std::string& filename) const; + + Parameters& operator=(const Parameters& other); + friend std::ostream& operator<< (std::ostream& out, const Parameters& params); + + private: + double m_poly[SCARAMUZZA_POLY_SIZE]; + double m_inv_poly[SCARAMUZZA_INV_POLY_SIZE]; + double m_C; + double m_D; + double m_E; + double m_center_x; + double m_center_y; + }; + + OCAMCamera(); + + /** + * \brief Constructor from the projection model parameters + */ + OCAMCamera(const Parameters& params); + + Camera::ModelType modelType(void) const; + const std::string& cameraName(void) const; + int imageWidth(void) const; + int imageHeight(void) const; + + void estimateIntrinsics(const cv::Size& boardSize, + const std::vector< std::vector >& objectPoints, + const std::vector< std::vector >& imagePoints); + + // Lift points from the image plane to the sphere + void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; + //%output P + + // Lift points from the image plane to the projective space + void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; + //%output P + + // Projects 3D points to the image plane (Pi function) + void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const; + //%output p + + // Projects 3D points to the image plane (Pi function) + // and calculates jacobian + //void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, + // Eigen::Matrix& J) const; + //%output p + //%output J + + void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const; + //%output p + + template + static void spaceToPlane(const T* const params, + const T* const q, const T* const t, + const Eigen::Matrix& P, + Eigen::Matrix& p); + template + static void spaceToSphere(const T* const params, + const T* const q, const T* const t, + const Eigen::Matrix& P, + Eigen::Matrix& P_s); + template + static void LiftToSphere(const T* const params, + const Eigen::Matrix& p, + Eigen::Matrix& P); + + template + static void SphereToPlane(const T* const params, const Eigen::Matrix& P, + Eigen::Matrix& p); + + + void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const; + cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, + float fx = -1.0f, float fy = -1.0f, + cv::Size imageSize = cv::Size(0, 0), + float cx = -1.0f, float cy = -1.0f, + cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const; + + int parameterCount(void) const; + + const Parameters& getParameters(void) const; + void setParameters(const Parameters& parameters); + + void readParameters(const std::vector& parameterVec); + void writeParameters(std::vector& parameterVec) const; + + void writeParametersToYamlFile(const std::string& filename) const; + + std::string parametersToString(void) const; + +private: + Parameters mParameters; + + double m_inv_scale; +}; + +typedef boost::shared_ptr OCAMCameraPtr; +typedef boost::shared_ptr OCAMCameraConstPtr; + +template +void +OCAMCamera::spaceToPlane(const T* const params, + const T* const q, const T* const t, + const Eigen::Matrix& P, + Eigen::Matrix& p) +{ + T P_c[3]; + { + T P_w[3]; + P_w[0] = T(P(0)); + P_w[1] = T(P(1)); + P_w[2] = T(P(2)); + + // Convert quaternion from Eigen convention (x, y, z, w) + // to Ceres convention (w, x, y, z) + T q_ceres[4] = {q[3], q[0], q[1], q[2]}; + + ceres::QuaternionRotatePoint(q_ceres, P_w, P_c); + + P_c[0] += t[0]; + P_c[1] += t[1]; + P_c[2] += t[2]; + } + + T c = params[0]; + T d = params[1]; + T e = params[2]; + T xc[2] = { params[3], params[4] }; + + //T poly[SCARAMUZZA_POLY_SIZE]; + //for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++) + // poly[i] = params[5+i]; + + T inv_poly[SCARAMUZZA_INV_POLY_SIZE]; + for (int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) + inv_poly[i] = params[5 + SCARAMUZZA_POLY_SIZE + i]; + + T norm_sqr = P_c[0] * P_c[0] + P_c[1] * P_c[1]; + T norm = T(0.0); + if (norm_sqr > T(0.0)) + norm = sqrt(norm_sqr); + + T theta = atan2(-P_c[2], norm); + T rho = T(0.0); + T theta_i = T(1.0); + + for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++) + { + rho += theta_i * inv_poly[i]; + theta_i *= theta; + } + + T invNorm = T(1.0) / norm; + T xn[2] = { + P_c[0] * invNorm * rho, + P_c[1] * invNorm * rho + }; + + p(0) = xn[0] * c + xn[1] * d + xc[0]; + p(1) = xn[0] * e + xn[1] + xc[1]; +} + +template +void +OCAMCamera::spaceToSphere(const T* const params, + const T* const q, const T* const t, + const Eigen::Matrix& P, + Eigen::Matrix& P_s) +{ + T P_c[3]; + { + T P_w[3]; + P_w[0] = T(P(0)); + P_w[1] = T(P(1)); + P_w[2] = T(P(2)); + + // Convert quaternion from Eigen convention (x, y, z, w) + // to Ceres convention (w, x, y, z) + T q_ceres[4] = {q[3], q[0], q[1], q[2]}; + + ceres::QuaternionRotatePoint(q_ceres, P_w, P_c); + + P_c[0] += t[0]; + P_c[1] += t[1]; + P_c[2] += t[2]; + } + + //T poly[SCARAMUZZA_POLY_SIZE]; + //for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++) + // poly[i] = params[5+i]; + + T norm_sqr = P_c[0] * P_c[0] + P_c[1] * P_c[1] + P_c[2] * P_c[2]; + T norm = T(0.0); + if (norm_sqr > T(0.0)) + norm = sqrt(norm_sqr); + + P_s(0) = P_c[0] / norm; + P_s(1) = P_c[1] / norm; + P_s(2) = P_c[2] / norm; +} + +template +void +OCAMCamera::LiftToSphere(const T* const params, + const Eigen::Matrix& p, + Eigen::Matrix& P) +{ + T c = params[0]; + T d = params[1]; + T e = params[2]; + T cc[2] = { params[3], params[4] }; + T poly[SCARAMUZZA_POLY_SIZE]; + for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++) + poly[i] = params[5+i]; + + // Relative to Center + T p_2d[2]; + p_2d[0] = T(p(0)); + p_2d[1] = T(p(1)); + + T xc[2] = { p_2d[0] - cc[0], p_2d[1] - cc[1]}; + + T inv_scale = T(1.0) / (c - d * e); + + // Affine Transformation + T xc_a[2]; + + xc_a[0] = inv_scale * (xc[0] - d * xc[1]); + xc_a[1] = inv_scale * (-e * xc[0] + c * xc[1]); + + T norm_sqr = xc_a[0] * xc_a[0] + xc_a[1] * xc_a[1]; + T phi = sqrt(norm_sqr); + T phi_i = T(1.0); + T z = T(0.0); + + for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++) + { + if (i!=1) { + z += phi_i * poly[i]; + } + phi_i *= phi; + } + + T p_3d[3]; + p_3d[0] = xc[0]; + p_3d[1] = xc[1]; + p_3d[2] = -z; + + T p_3d_norm_sqr = p_3d[0] * p_3d[0] + p_3d[1] * p_3d[1] + p_3d[2] * p_3d[2]; + T p_3d_norm = sqrt(p_3d_norm_sqr); + + P << p_3d[0] / p_3d_norm, p_3d[1] / p_3d_norm, p_3d[2] / p_3d_norm; +} + +template +void OCAMCamera::SphereToPlane(const T* const params, const Eigen::Matrix& P, + Eigen::Matrix& p) { + T P_c[3]; + { + P_c[0] = T(P(0)); + P_c[1] = T(P(1)); + P_c[2] = T(P(2)); + } + + T c = params[0]; + T d = params[1]; + T e = params[2]; + T xc[2] = {params[3], params[4]}; + + T inv_poly[SCARAMUZZA_INV_POLY_SIZE]; + for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++) + inv_poly[i] = params[5 + SCARAMUZZA_POLY_SIZE + i]; + + T norm_sqr = P_c[0] * P_c[0] + P_c[1] * P_c[1]; + T norm = T(0.0); + if (norm_sqr > T(0.0)) norm = sqrt(norm_sqr); + + T theta = atan2(-P_c[2], norm); + T rho = T(0.0); + T theta_i = T(1.0); + + for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++) { + rho += theta_i * inv_poly[i]; + theta_i *= theta; + } + + T invNorm = T(1.0) / norm; + T xn[2] = {P_c[0] * invNorm * rho, P_c[1] * invNorm * rho}; + + p(0) = xn[0] * c + xn[1] * d + xc[0]; + p(1) = xn[0] * e + xn[1] + xc[1]; +} +} + +#endif diff --git a/camera_models/include/camodocal/chessboard/Chessboard.h b/camera_models/include/camodocal/chessboard/Chessboard.h new file mode 100644 index 0000000..fb42198 --- /dev/null +++ b/camera_models/include/camodocal/chessboard/Chessboard.h @@ -0,0 +1,86 @@ +#ifndef CHESSBOARD_H +#define CHESSBOARD_H + +#include +#include + +namespace camodocal +{ + +// forward declarations +class ChessboardCorner; +typedef boost::shared_ptr ChessboardCornerPtr; +class ChessboardQuad; +typedef boost::shared_ptr ChessboardQuadPtr; + +class Chessboard +{ +public: + Chessboard(cv::Size boardSize, cv::Mat& image); + + void findCorners(bool useOpenCV = false); + const std::vector& getCorners(void) const; + bool cornersFound(void) const; + + const cv::Mat& getImage(void) const; + const cv::Mat& getSketch(void) const; + +private: + bool findChessboardCorners(const cv::Mat& image, + const cv::Size& patternSize, + std::vector& corners, + int flags, bool useOpenCV); + + bool findChessboardCornersImproved(const cv::Mat& image, + const cv::Size& patternSize, + std::vector& corners, + int flags); + + void cleanFoundConnectedQuads(std::vector& quadGroup, cv::Size patternSize); + + void findConnectedQuads(std::vector& quads, + std::vector& group, + int group_idx, int dilation); + +// int checkQuadGroup(std::vector& quadGroup, +// std::vector& outCorners, +// cv::Size patternSize); + + void labelQuadGroup(std::vector& quad_group, + cv::Size patternSize, bool firstRun); + + void findQuadNeighbors(std::vector& quads, int dilation); + + int augmentBestRun(std::vector& candidateQuads, int candidateDilation, + std::vector& existingQuads, int existingDilation); + + void generateQuads(std::vector& quads, + cv::Mat& image, int flags, + int dilation, bool firstRun); + + bool checkQuadGroup(std::vector& quads, + std::vector& corners, + cv::Size patternSize); + + void getQuadrangleHypotheses(const std::vector< std::vector >& contours, + std::vector< std::pair >& quads, + int classId) const; + + bool checkChessboard(const cv::Mat& image, cv::Size patternSize) const; + + bool checkBoardMonotony(std::vector& corners, + cv::Size patternSize); + + bool matchCorners(ChessboardQuadPtr& quad1, int corner1, + ChessboardQuadPtr& quad2, int corner2) const; + + cv::Mat mImage; + cv::Mat mSketch; + std::vector mCorners; + cv::Size mBoardSize; + bool mCornersFound; +}; + +} + +#endif diff --git a/camera_models/include/camodocal/chessboard/ChessboardCorner.h b/camera_models/include/camodocal/chessboard/ChessboardCorner.h new file mode 100644 index 0000000..8b0f2bb --- /dev/null +++ b/camera_models/include/camodocal/chessboard/ChessboardCorner.h @@ -0,0 +1,45 @@ +#ifndef CHESSBOARDCORNER_H +#define CHESSBOARDCORNER_H + +#include +#include + +namespace camodocal +{ + +class ChessboardCorner; +typedef boost::shared_ptr ChessboardCornerPtr; + +class ChessboardCorner +{ +public: + ChessboardCorner() : row(0), column(0), needsNeighbor(true), count(0) {} + + float meanDist(int &n) const + { + float sum = 0; + n = 0; + for (int i = 0; i < 4; ++i) + { + if (neighbors[i].get()) + { + float dx = neighbors[i]->pt.x - pt.x; + float dy = neighbors[i]->pt.y - pt.y; + sum += sqrt(dx*dx + dy*dy); + n++; + } + } + return sum / std::max(n, 1); + } + + cv::Point2f pt; // X and y coordinates + int row; // Row and column of the corner + int column; // in the found pattern + bool needsNeighbor; // Does the corner require a neighbor? + int count; // number of corner neighbors + ChessboardCornerPtr neighbors[4]; // pointer to all corner neighbors +}; + +} + +#endif diff --git a/camera_models/include/camodocal/chessboard/ChessboardQuad.h b/camera_models/include/camodocal/chessboard/ChessboardQuad.h new file mode 100644 index 0000000..9bb7784 --- /dev/null +++ b/camera_models/include/camodocal/chessboard/ChessboardQuad.h @@ -0,0 +1,29 @@ +#ifndef CHESSBOARDQUAD_H +#define CHESSBOARDQUAD_H + +#include + +#include "camodocal/chessboard/ChessboardCorner.h" + +namespace camodocal +{ + +class ChessboardQuad; +typedef boost::shared_ptr ChessboardQuadPtr; + +class ChessboardQuad +{ +public: + ChessboardQuad() : count(0), group_idx(-1), edge_len(FLT_MAX), labeled(false) {} + + int count; // Number of quad neighbors + int group_idx; // Quad group ID + float edge_len; // Smallest side length^2 + ChessboardCornerPtr corners[4]; // Coordinates of quad corners + ChessboardQuadPtr neighbors[4]; // Pointers of quad neighbors + bool labeled; // Has this corner been labeled? +}; + +} + +#endif diff --git a/camera_models/include/camodocal/chessboard/Spline.h b/camera_models/include/camodocal/chessboard/Spline.h new file mode 100644 index 0000000..5ac5809 --- /dev/null +++ b/camera_models/include/camodocal/chessboard/Spline.h @@ -0,0 +1,319 @@ +/* dynamo:- Event driven molecular dynamics simulator + http://www.marcusbannerman.co.uk/dynamo + Copyright (C) 2011 Marcus N Campbell Bannerman + + This program is free software: you can redistribute it and/or + modify it under the terms of the GNU General Public License + version 3 as published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +*/ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace ublas = boost::numeric::ublas; + +class Spline : private std::vector > +{ +public: + //The boundary conditions available + enum BC_type { + FIXED_1ST_DERIV_BC, + FIXED_2ND_DERIV_BC, + PARABOLIC_RUNOUT_BC + }; + + enum Spline_type { + LINEAR, + CUBIC + }; + + //Constructor takes the boundary conditions as arguments, this + //sets the first derivative (gradient) at the lower and upper + //end points + Spline(): + _valid(false), + _BCLow(FIXED_2ND_DERIV_BC), _BCHigh(FIXED_2ND_DERIV_BC), + _BCLowVal(0), _BCHighVal(0), + _type(CUBIC) + {} + + typedef std::vector > base; + typedef base::const_iterator const_iterator; + + //Standard STL read-only container stuff + const_iterator begin() const { return base::begin(); } + const_iterator end() const { return base::end(); } + void clear() { _valid = false; base::clear(); _data.clear(); } + size_t size() const { return base::size(); } + size_t max_size() const { return base::max_size(); } + size_t capacity() const { return base::capacity(); } + bool empty() const { return base::empty(); } + + //Add a point to the spline, and invalidate it so its + //recalculated on the next access + inline void addPoint(double x, double y) + { + _valid = false; + base::push_back(std::pair(x,y)); + } + + //Reset the boundary conditions + inline void setLowBC(BC_type BC, double val = 0) + { _BCLow = BC; _BCLowVal = val; _valid = false; } + + inline void setHighBC(BC_type BC, double val = 0) + { _BCHigh = BC; _BCHighVal = val; _valid = false; } + + void setType(Spline_type type) { _type = type; _valid = false; } + + //Check if the spline has been calculated, then generate the + //spline interpolated value + double operator()(double xval) + { + if (!_valid) generate(); + + //Special cases when we're outside the range of the spline points + if (xval <= x(0)) return lowCalc(xval); + if (xval >= x(size()-1)) return highCalc(xval); + + //Check all intervals except the last one + for (std::vector::const_iterator iPtr = _data.begin(); + iPtr != _data.end()-1; ++iPtr) + if ((xval >= iPtr->x) && (xval <= (iPtr+1)->x)) + return splineCalc(iPtr, xval); + + return splineCalc(_data.end() - 1, xval); + } + +private: + + ///////PRIVATE DATA MEMBERS + struct SplineData { double x,a,b,c,d; }; + //vector of calculated spline data + std::vector _data; + //Second derivative at each point + ublas::vector _ddy; + //Tracks whether the spline parameters have been calculated for + //the current set of points + bool _valid; + //The boundary conditions + BC_type _BCLow, _BCHigh; + //The values of the boundary conditions + double _BCLowVal, _BCHighVal; + + Spline_type _type; + + ///////PRIVATE FUNCTIONS + //Function to calculate the value of a given spline at a point xval + inline double splineCalc(std::vector::const_iterator i, double xval) + { + const double lx = xval - i->x; + return ((i->a * lx + i->b) * lx + i->c) * lx + i->d; + } + + inline double lowCalc(double xval) + { + const double lx = xval - x(0); + + if (_type == LINEAR) + return lx * _BCHighVal + y(0); + + const double firstDeriv = (y(1) - y(0)) / h(0) - 2 * h(0) * (_data[0].b + 2 * _data[1].b) / 6; + + switch(_BCLow) + { + case FIXED_1ST_DERIV_BC: + return lx * _BCLowVal + y(0); + case FIXED_2ND_DERIV_BC: + return lx * lx * _BCLowVal + firstDeriv * lx + y(0); + case PARABOLIC_RUNOUT_BC: + return lx * lx * _ddy[0] + lx * firstDeriv + y(0); + } + throw std::runtime_error("Unknown BC"); + } + + inline double highCalc(double xval) + { + const double lx = xval - x(size() - 1); + + if (_type == LINEAR) + return lx * _BCHighVal + y(size() - 1); + + const double firstDeriv = 2 * h(size() - 2) * (_ddy[size() - 2] + 2 * _ddy[size() - 1]) / 6 + (y(size() - 1) - y(size() - 2)) / h(size() - 2); + + switch(_BCHigh) + { + case FIXED_1ST_DERIV_BC: + return lx * _BCHighVal + y(size() - 1); + case FIXED_2ND_DERIV_BC: + return lx * lx * _BCHighVal + firstDeriv * lx + y(size() - 1); + case PARABOLIC_RUNOUT_BC: + return lx * lx * _ddy[size()-1] + lx * firstDeriv + y(size() - 1); + } + throw std::runtime_error("Unknown BC"); + } + + //These just provide access to the point data in a clean way + inline double x(size_t i) const { return operator[](i).first; } + inline double y(size_t i) const { return operator[](i).second; } + inline double h(size_t i) const { return x(i+1) - x(i); } + + //Invert a arbitrary matrix using the boost ublas library + template + bool InvertMatrix(ublas::matrix A, + ublas::matrix& inverse) + { + using namespace ublas; + + // create a permutation matrix for the LU-factorization + permutation_matrix pm(A.size1()); + + // perform LU-factorization + int res = lu_factorize(A,pm); + if( res != 0 ) return false; + + // create identity matrix of "inverse" + inverse.assign(ublas::identity_matrix(A.size1())); + + // backsubstitute to get the inverse + lu_substitute(A, pm, inverse); + + return true; + } + + //This function will recalculate the spline parameters and store + //them in _data, ready for spline interpolation + void generate() + { + if (size() < 2) + throw std::runtime_error("Spline requires at least 2 points"); + + //If any spline points are at the same x location, we have to + //just slightly seperate them + { + bool testPassed(false); + while (!testPassed) + { + testPassed = true; + std::sort(base::begin(), base::end()); + + for (base::iterator iPtr = base::begin(); iPtr != base::end() - 1; ++iPtr) + if (iPtr->first == (iPtr+1)->first) + { + if ((iPtr+1)->first != 0) + (iPtr+1)->first += (iPtr+1)->first + * std::numeric_limits::epsilon() * 10; + else + (iPtr+1)->first = std::numeric_limits::epsilon() * 10; + testPassed = false; + break; + } + } + } + + const size_t e = size() - 1; + + switch (_type) + { + case LINEAR: + { + _data.resize(e); + for (size_t i(0); i < e; ++i) + { + _data[i].x = x(i); + _data[i].a = 0; + _data[i].b = 0; + _data[i].c = (y(i+1) - y(i)) / (x(i+1) - x(i)); + _data[i].d = y(i); + } + break; + } + case CUBIC: + { + ublas::matrix A(size(), size()); + for (size_t yv(0); yv <= e; ++yv) + for (size_t xv(0); xv <= e; ++xv) + A(xv,yv) = 0; + + for (size_t i(1); i < e; ++i) + { + A(i-1,i) = h(i-1); + A(i,i) = 2 * (h(i-1) + h(i)); + A(i+1,i) = h(i); + } + + ublas::vector C(size()); + for (size_t xv(0); xv <= e; ++xv) + C(xv) = 0; + + for (size_t i(1); i < e; ++i) + C(i) = 6 * + ((y(i+1) - y(i)) / h(i) + - (y(i) - y(i-1)) / h(i-1)); + + //Boundary conditions + switch(_BCLow) + { + case FIXED_1ST_DERIV_BC: + C(0) = 6 * ((y(1) - y(0)) / h(0) - _BCLowVal); + A(0,0) = 2 * h(0); + A(1,0) = h(0); + break; + case FIXED_2ND_DERIV_BC: + C(0) = _BCLowVal; + A(0,0) = 1; + break; + case PARABOLIC_RUNOUT_BC: + C(0) = 0; A(0,0) = 1; A(1,0) = -1; + break; + } + + switch(_BCHigh) + { + case FIXED_1ST_DERIV_BC: + C(e) = 6 * (_BCHighVal - (y(e) - y(e-1)) / h(e-1)); + A(e,e) = 2 * h(e - 1); + A(e-1,e) = h(e - 1); + break; + case FIXED_2ND_DERIV_BC: + C(e) = _BCHighVal; + A(e,e) = 1; + break; + case PARABOLIC_RUNOUT_BC: + C(e) = 0; A(e,e) = 1; A(e-1,e) = -1; + break; + } + + ublas::matrix AInv(size(), size()); + InvertMatrix(A,AInv); + + _ddy = ublas::prod(C, AInv); + + _data.resize(size()-1); + for (size_t i(0); i < e; ++i) + { + _data[i].x = x(i); + _data[i].a = (_ddy(i+1) - _ddy(i)) / (6 * h(i)); + _data[i].b = _ddy(i) / 2; + _data[i].c = (y(i+1) - y(i)) / h(i) - _ddy(i+1) * h(i) / 6 - _ddy(i) * h(i) / 3; + _data[i].d = y(i); + } + } + } + _valid = true; + } +}; diff --git a/camera_models/include/camodocal/gpl/EigenQuaternionParameterization.h b/camera_models/include/camodocal/gpl/EigenQuaternionParameterization.h new file mode 100644 index 0000000..3d08f0e --- /dev/null +++ b/camera_models/include/camodocal/gpl/EigenQuaternionParameterization.h @@ -0,0 +1,40 @@ +#ifndef EIGENQUATERNIONPARAMETERIZATION_H +#define EIGENQUATERNIONPARAMETERIZATION_H + +#include "ceres/local_parameterization.h" + +namespace camodocal +{ + +class EigenQuaternionParameterization : public ceres::LocalParameterization +{ +public: + virtual ~EigenQuaternionParameterization() {} + virtual bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const; + virtual bool ComputeJacobian(const double* x, + double* jacobian) const; + virtual int GlobalSize() const { return 4; } + virtual int LocalSize() const { return 3; } + +private: + template + void EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const; +}; + + +template +void +EigenQuaternionParameterization::EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const +{ + zw[0] = z[3] * w[0] + z[0] * w[3] + z[1] * w[2] - z[2] * w[1]; + zw[1] = z[3] * w[1] - z[0] * w[2] + z[1] * w[3] + z[2] * w[0]; + zw[2] = z[3] * w[2] + z[0] * w[1] - z[1] * w[0] + z[2] * w[3]; + zw[3] = z[3] * w[3] - z[0] * w[0] - z[1] * w[1] - z[2] * w[2]; +} + +} + +#endif + diff --git a/camera_models/include/camodocal/gpl/EigenUtils.h b/camera_models/include/camodocal/gpl/EigenUtils.h new file mode 100644 index 0000000..72b46c7 --- /dev/null +++ b/camera_models/include/camodocal/gpl/EigenUtils.h @@ -0,0 +1,418 @@ +#ifndef EIGENUTILS_H +#define EIGENUTILS_H + +#include + +#include "ceres/rotation.h" +#include "camodocal/gpl/gpl.h" + +namespace camodocal +{ + +// Returns the 3D cross product skew symmetric matrix of a given 3D vector +template +Eigen::Matrix skew(const Eigen::Matrix& vec) +{ + return (Eigen::Matrix() << T(0), -vec(2), vec(1), + vec(2), T(0), -vec(0), + -vec(1), vec(0), T(0)).finished(); +} + +template +typename Eigen::MatrixBase::PlainObject sqrtm(const Eigen::MatrixBase& A) +{ + Eigen::SelfAdjointEigenSolver es(A); + + return es.operatorSqrt(); +} + +template +Eigen::Matrix AngleAxisToRotationMatrix(const Eigen::Matrix& rvec) +{ + T angle = rvec.norm(); + if (angle == T(0)) + { + return Eigen::Matrix::Identity(); + } + + Eigen::Matrix axis; + axis = rvec.normalized(); + + Eigen::Matrix rmat; + rmat = Eigen::AngleAxis(angle, axis); + + return rmat; +} + +template +Eigen::Quaternion AngleAxisToQuaternion(const Eigen::Matrix& rvec) +{ + Eigen::Matrix rmat = AngleAxisToRotationMatrix(rvec); + + return Eigen::Quaternion(rmat); +} + +template +void AngleAxisToQuaternion(const Eigen::Matrix& rvec, T* q) +{ + Eigen::Quaternion quat = AngleAxisToQuaternion(rvec); + + q[0] = quat.x(); + q[1] = quat.y(); + q[2] = quat.z(); + q[3] = quat.w(); +} + +template +Eigen::Matrix RotationToAngleAxis(const Eigen::Matrix & rmat) +{ + Eigen::AngleAxis angleaxis; + angleaxis.fromRotationMatrix(rmat); + return angleaxis.angle() * angleaxis.axis(); + +} + +template +void QuaternionToAngleAxis(const T* const q, Eigen::Matrix& rvec) +{ + Eigen::Quaternion quat(q[3], q[0], q[1], q[2]); + + Eigen::Matrix rmat = quat.toRotationMatrix(); + + Eigen::AngleAxis angleaxis; + angleaxis.fromRotationMatrix(rmat); + + rvec = angleaxis.angle() * angleaxis.axis(); +} + +template +Eigen::Matrix QuaternionToRotation(const T* const q) +{ + T R[9]; + ceres::QuaternionToRotation(q, R); + + Eigen::Matrix rmat; + for (int i = 0; i < 3; ++i) + { + for (int j = 0; j < 3; ++j) + { + rmat(i,j) = R[i * 3 + j]; + } + } + + return rmat; +} + +template +void QuaternionToRotation(const T* const q, T* rot) +{ + ceres::QuaternionToRotation(q, rot); +} + +template +Eigen::Matrix QuaternionMultMatLeft(const Eigen::Quaternion& q) +{ + return (Eigen::Matrix() << q.w(), -q.z(), q.y(), q.x(), + q.z(), q.w(), -q.x(), q.y(), + -q.y(), q.x(), q.w(), q.z(), + -q.x(), -q.y(), -q.z(), q.w()).finished(); +} + +template +Eigen::Matrix QuaternionMultMatRight(const Eigen::Quaternion& q) +{ + return (Eigen::Matrix() << q.w(), q.z(), -q.y(), q.x(), + -q.z(), q.w(), q.x(), q.y(), + q.y(), -q.x(), q.w(), q.z(), + -q.x(), -q.y(), -q.z(), q.w()).finished(); +} + +/// @param theta - rotation about screw axis +/// @param d - projection of tvec on the rotation axis +/// @param l - screw axis direction +/// @param m - screw axis moment +template +void AngleAxisAndTranslationToScrew(const Eigen::Matrix& rvec, + const Eigen::Matrix& tvec, + T& theta, T& d, + Eigen::Matrix& l, + Eigen::Matrix& m) +{ + + theta = rvec.norm(); + if (theta == 0) + { + l.setZero(); + m.setZero(); + std::cout << "Warning: Undefined screw! Returned 0. " << std::endl; + } + + l = rvec.normalized(); + + Eigen::Matrix t = tvec; + + d = t.transpose() * l; + + // point on screw axis - projection of origin on screw axis + Eigen::Matrix c; + c = 0.5 * (t - d * l + (1.0 / tan(theta / 2.0) * l).cross(t)); + + // c and hence the screw axis is not defined if theta is either 0 or M_PI + m = c.cross(l); +} + +template +Eigen::Matrix RPY2mat(T roll, T pitch, T yaw) +{ + Eigen::Matrix m; + + T cr = cos(roll); + T sr = sin(roll); + T cp = cos(pitch); + T sp = sin(pitch); + T cy = cos(yaw); + T sy = sin(yaw); + + m(0,0) = cy * cp; + m(0,1) = cy * sp * sr - sy * cr; + m(0,2) = cy * sp * cr + sy * sr; + m(1,0) = sy * cp; + m(1,1) = sy * sp * sr + cy * cr; + m(1,2) = sy * sp * cr - cy * sr; + m(2,0) = - sp; + m(2,1) = cp * sr; + m(2,2) = cp * cr; + return m; +} + +template +void mat2RPY(const Eigen::Matrix& m, T& roll, T& pitch, T& yaw) +{ + roll = atan2(m(2,1), m(2,2)); + pitch = atan2(-m(2,0), sqrt(m(2,1) * m(2,1) + m(2,2) * m(2,2))); + yaw = atan2(m(1,0), m(0,0)); +} + +template +Eigen::Matrix homogeneousTransform(const Eigen::Matrix& R, const Eigen::Matrix& t) +{ + Eigen::Matrix H; + H.setIdentity(); + + H.block(0,0,3,3) = R; + H.block(0,3,3,1) = t; + + return H; +} + +template +Eigen::Matrix poseWithCartesianTranslation(const T* const q, const T* const p) +{ + Eigen::Matrix pose = Eigen::Matrix::Identity(); + + T rotation[9]; + ceres::QuaternionToRotation(q, rotation); + for (int i = 0; i < 3; ++i) + { + for (int j = 0; j < 3; ++j) + { + pose(i,j) = rotation[i * 3 + j]; + } + } + + pose(0,3) = p[0]; + pose(1,3) = p[1]; + pose(2,3) = p[2]; + + return pose; +} + +template +Eigen::Matrix poseWithSphericalTranslation(const T* const q, const T* const p, const T scale = T(1.0)) +{ + Eigen::Matrix pose = Eigen::Matrix::Identity(); + + T rotation[9]; + ceres::QuaternionToRotation(q, rotation); + for (int i = 0; i < 3; ++i) + { + for (int j = 0; j < 3; ++j) + { + pose(i,j) = rotation[i * 3 + j]; + } + } + + T theta = p[0]; + T phi = p[1]; + pose(0,3) = sin(theta) * cos(phi) * scale; + pose(1,3) = sin(theta) * sin(phi) * scale; + pose(2,3) = cos(theta) * scale; + + return pose; +} + +// Returns the Sampson error of a given essential matrix and 2 image points +template +T sampsonError(const Eigen::Matrix& E, + const Eigen::Matrix& p1, + const Eigen::Matrix& p2) +{ + Eigen::Matrix Ex1 = E * p1; + Eigen::Matrix Etx2 = E.transpose() * p2; + + T x2tEx1 = p2.dot(Ex1); + + // compute Sampson error + T err = square(x2tEx1) / (square(Ex1(0,0)) + square(Ex1(1,0)) + square(Etx2(0,0)) + square(Etx2(1,0))); + + return err; +} + +// Returns the Sampson error of a given rotation/translation and 2 image points +template +T sampsonError(const Eigen::Matrix& R, + const Eigen::Matrix& t, + const Eigen::Matrix& p1, + const Eigen::Matrix& p2) +{ + // construct essential matrix + Eigen::Matrix E = skew(t) * R; + + Eigen::Matrix Ex1 = E * p1; + Eigen::Matrix Etx2 = E.transpose() * p2; + + T x2tEx1 = p2.dot(Ex1); + + // compute Sampson error + T err = square(x2tEx1) / (square(Ex1(0,0)) + square(Ex1(1,0)) + square(Etx2(0,0)) + square(Etx2(1,0))); + + return err; +} + +// Returns the Sampson error of a given rotation/translation and 2 image points +template +T sampsonError(const Eigen::Matrix& H, + const Eigen::Matrix& p1, + const Eigen::Matrix& p2) +{ + Eigen::Matrix R = H.block(0, 0, 3, 3); + Eigen::Matrix t = H.block(0, 3, 3, 1); + + return sampsonError(R, t, p1, p2); +} + +template +Eigen::Matrix +transformPoint(const Eigen::Matrix& H, const Eigen::Matrix& P) +{ + Eigen::Matrix P_trans = H.block(0, 0, 3, 3) * P + H.block(0, 3, 3, 1); + + return P_trans; +} + +template +Eigen::Matrix +estimate3DRigidTransform(const std::vector, Eigen::aligned_allocator > >& points1, + const std::vector, Eigen::aligned_allocator > >& points2) +{ + // compute centroids + Eigen::Matrix c1, c2; + c1.setZero(); c2.setZero(); + + for (size_t i = 0; i < points1.size(); ++i) + { + c1 += points1.at(i); + c2 += points2.at(i); + } + + c1 /= points1.size(); + c2 /= points1.size(); + + Eigen::Matrix X(3, points1.size()); + Eigen::Matrix Y(3, points1.size()); + for (size_t i = 0; i < points1.size(); ++i) + { + X.col(i) = points1.at(i) - c1; + Y.col(i) = points2.at(i) - c2; + } + + Eigen::Matrix H = X * Y.transpose(); + + Eigen::JacobiSVD< Eigen::Matrix > svd(H, Eigen::ComputeFullU | Eigen::ComputeFullV); + + Eigen::Matrix U = svd.matrixU(); + Eigen::Matrix V = svd.matrixV(); + if (U.determinant() * V.determinant() < 0.0) + { + V.col(2) *= -1.0; + } + + Eigen::Matrix R = V * U.transpose(); + Eigen::Matrix t = c2 - R * c1; + + return homogeneousTransform(R, t); +} + +template +Eigen::Matrix +estimate3DRigidSimilarityTransform(const std::vector, Eigen::aligned_allocator > >& points1, + const std::vector, Eigen::aligned_allocator > >& points2) +{ + // compute centroids + Eigen::Matrix c1, c2; + c1.setZero(); c2.setZero(); + + for (size_t i = 0; i < points1.size(); ++i) + { + c1 += points1.at(i); + c2 += points2.at(i); + } + + c1 /= points1.size(); + c2 /= points1.size(); + + Eigen::Matrix X(3, points1.size()); + Eigen::Matrix Y(3, points1.size()); + for (size_t i = 0; i < points1.size(); ++i) + { + X.col(i) = points1.at(i) - c1; + Y.col(i) = points2.at(i) - c2; + } + + Eigen::Matrix H = X * Y.transpose(); + + Eigen::JacobiSVD< Eigen::Matrix > svd(H, Eigen::ComputeFullU | Eigen::ComputeFullV); + + Eigen::Matrix U = svd.matrixU(); + Eigen::Matrix V = svd.matrixV(); + if (U.determinant() * V.determinant() < 0.0) + { + V.col(2) *= -1.0; + } + + Eigen::Matrix R = V * U.transpose(); + + std::vector, Eigen::aligned_allocator > > rotatedPoints1(points1.size()); + for (size_t i = 0; i < points1.size(); ++i) + { + rotatedPoints1.at(i) = R * (points1.at(i) - c1); + } + + double sum_ss = 0.0, sum_tt = 0.0; + for (size_t i = 0; i < points1.size(); ++i) + { + sum_ss += (points1.at(i) - c1).squaredNorm(); + sum_tt += (points2.at(i) - c2).dot(rotatedPoints1.at(i)); + } + + double scale = sum_tt / sum_ss; + + Eigen::Matrix sR = scale * R; + Eigen::Matrix t = c2 - sR * c1; + + return homogeneousTransform(sR, t); +} + +} + +#endif diff --git a/camera_models/include/camodocal/gpl/gpl.h b/camera_models/include/camodocal/gpl/gpl.h new file mode 100644 index 0000000..9f511f6 --- /dev/null +++ b/camera_models/include/camodocal/gpl/gpl.h @@ -0,0 +1,111 @@ +#ifndef GPL_H +#define GPL_H + +#include +#include +#include + +namespace camodocal +{ + +template +const T clamp(const T& v, const T& a, const T& b) +{ + return std::min(b, std::max(a, v)); +} + +double hypot3(double x, double y, double z); +float hypot3f(float x, float y, float z); + +template +const T normalizeTheta(const T& theta) +{ + T normTheta = theta; + + while (normTheta < - M_PI) + { + normTheta += 2.0 * M_PI; + } + while (normTheta > M_PI) + { + normTheta -= 2.0 * M_PI; + } + + return normTheta; +} + +double d2r(double deg); +float d2r(float deg); +double r2d(double rad); +float r2d(float rad); + +double sinc(double theta); + +template +const T square(const T& x) +{ + return x * x; +} + +template +const T cube(const T& x) +{ + return x * x * x; +} + +template +const T random(const T& a, const T& b) +{ + return static_cast(rand()) / RAND_MAX * (b - a) + a; +} + +template +const T randomNormal(const T& sigma) +{ + T x1, x2, w; + + do + { + x1 = 2.0 * random(0.0, 1.0) - 1.0; + x2 = 2.0 * random(0.0, 1.0) - 1.0; + w = x1 * x1 + x2 * x2; + } + while (w >= 1.0 || w == 0.0); + + w = sqrt((-2.0 * log(w)) / w); + + return x1 * w * sigma; +} + +unsigned long long timeInMicroseconds(void); + +double timeInSeconds(void); + +void colorDepthImage(cv::Mat& imgDepth, + cv::Mat& imgColoredDepth, + float minRange, float maxRange); + +bool colormap(const std::string& name, unsigned char idx, + float& r, float& g, float& b); + +std::vector bresLine(int x0, int y0, int x1, int y1); +std::vector bresCircle(int x0, int y0, int r); + +void fitCircle(const std::vector& points, + double& centerX, double& centerY, double& radius); + +std::vector intersectCircles(double x1, double y1, double r1, + double x2, double y2, double r2); + +void LLtoUTM(double latitude, double longitude, + double& utmNorthing, double& utmEasting, + std::string& utmZone); +void UTMtoLL(double utmNorthing, double utmEasting, + const std::string& utmZone, + double& latitude, double& longitude); + +long int timestampDiff(uint64_t t1, uint64_t t2); + +} + +#endif diff --git a/camera_models/include/camodocal/sparse_graph/Transform.h b/camera_models/include/camodocal/sparse_graph/Transform.h new file mode 100644 index 0000000..c164b0a --- /dev/null +++ b/camera_models/include/camodocal/sparse_graph/Transform.h @@ -0,0 +1,38 @@ +#ifndef TRANSFORM_H +#define TRANSFORM_H + +#include +#include +#include + +namespace camodocal +{ + +class Transform +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + Transform(); + Transform(const Eigen::Matrix4d& H); + + Eigen::Quaterniond& rotation(void); + const Eigen::Quaterniond& rotation(void) const; + double* rotationData(void); + const double* const rotationData(void) const; + + Eigen::Vector3d& translation(void); + const Eigen::Vector3d& translation(void) const; + double* translationData(void); + const double* const translationData(void) const; + + Eigen::Matrix4d toMatrix(void) const; + +private: + Eigen::Quaterniond m_q; + Eigen::Vector3d m_t; +}; + +} + +#endif diff --git a/camera_models/package.xml b/camera_models/package.xml new file mode 100644 index 0000000..7c28ebb --- /dev/null +++ b/camera_models/package.xml @@ -0,0 +1,54 @@ + + + camera_models + 0.0.0 + The camera_models package + + + + + dvorak + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + + + catkin + roscpp + std_msgs + roscpp + std_msgs + + + + + + + + \ No newline at end of file diff --git a/camera_models/readme.md b/camera_models/readme.md new file mode 100644 index 0000000..a986a9f --- /dev/null +++ b/camera_models/readme.md @@ -0,0 +1,15 @@ +part of [camodocal](https://github.com/hengli/camodocal) + +[Google Ceres](http://ceres-solver.org) is needed. + +# Calibration: + +Use [intrinsic_calib.cc](https://github.com/dvorak0/camera_model/blob/master/src/intrinsic_calib.cc) to calibrate your camera. + +# Undistortion: + +See [Camera.h](https://github.com/dvorak0/camera_model/blob/master/include/camodocal/camera_models/Camera.h) for general interface: + + - liftProjective: Lift points from the image plane to the projective space. + - spaceToPlane: Projects 3D points to the image plane (Pi function) + diff --git a/camera_models/src/calib/CameraCalibration.cc b/camera_models/src/calib/CameraCalibration.cc new file mode 100644 index 0000000..10543e9 --- /dev/null +++ b/camera_models/src/calib/CameraCalibration.cc @@ -0,0 +1,571 @@ +#include "camodocal/calib/CameraCalibration.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "camodocal/camera_models/CameraFactory.h" +#include "camodocal/sparse_graph/Transform.h" +#include "camodocal/gpl/EigenQuaternionParameterization.h" +#include "camodocal/gpl/EigenUtils.h" +#include "camodocal/camera_models/CostFunctionFactory.h" + +#include "ceres/ceres.h" +namespace camodocal +{ + +CameraCalibration::CameraCalibration() + : m_boardSize(cv::Size(0,0)) + , m_squareSize(0.0f) + , m_verbose(false) +{ + +} + +CameraCalibration::CameraCalibration(const Camera::ModelType modelType, + const std::string& cameraName, + const cv::Size& imageSize, + const cv::Size& boardSize, + float squareSize) + : m_boardSize(boardSize) + , m_squareSize(squareSize) + , m_verbose(false) +{ + m_camera = CameraFactory::instance()->generateCamera(modelType, cameraName, imageSize); +} + +void +CameraCalibration::clear(void) +{ + m_imagePoints.clear(); + m_scenePoints.clear(); +} + +void +CameraCalibration::addChessboardData(const std::vector& corners) +{ + m_imagePoints.push_back(corners); + + std::vector scenePointsInView; + for (int i = 0; i < m_boardSize.height; ++i) + { + for (int j = 0; j < m_boardSize.width; ++j) + { + scenePointsInView.push_back(cv::Point3f(i * m_squareSize, j * m_squareSize, 0.0)); + } + } + m_scenePoints.push_back(scenePointsInView); +} + +bool +CameraCalibration::calibrate(void) +{ + int imageCount = m_imagePoints.size(); + + // compute intrinsic camera parameters and extrinsic parameters for each of the views + std::vector rvecs; + std::vector tvecs; + bool ret = calibrateHelper(m_camera, rvecs, tvecs); + + m_cameraPoses = cv::Mat(imageCount, 6, CV_64F); + for (int i = 0; i < imageCount; ++i) + { + m_cameraPoses.at(i,0) = rvecs.at(i).at(0); + m_cameraPoses.at(i,1) = rvecs.at(i).at(1); + m_cameraPoses.at(i,2) = rvecs.at(i).at(2); + m_cameraPoses.at(i,3) = tvecs.at(i).at(0); + m_cameraPoses.at(i,4) = tvecs.at(i).at(1); + m_cameraPoses.at(i,5) = tvecs.at(i).at(2); + } + + // Compute measurement covariance. + std::vector > errVec(m_imagePoints.size()); + Eigen::Vector2d errSum = Eigen::Vector2d::Zero(); + size_t errCount = 0; + for (size_t i = 0; i < m_imagePoints.size(); ++i) + { + std::vector estImagePoints; + m_camera->projectPoints(m_scenePoints.at(i), rvecs.at(i), tvecs.at(i), + estImagePoints); + + for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) + { + cv::Point2f pObs = m_imagePoints.at(i).at(j); + cv::Point2f pEst = estImagePoints.at(j); + + cv::Point2f err = pObs - pEst; + + errVec.at(i).push_back(err); + + errSum += Eigen::Vector2d(err.x, err.y); + } + + errCount += m_imagePoints.at(i).size(); + } + + Eigen::Vector2d errMean = errSum / static_cast(errCount); + + Eigen::Matrix2d measurementCovariance = Eigen::Matrix2d::Zero(); + for (size_t i = 0; i < errVec.size(); ++i) + { + for (size_t j = 0; j < errVec.at(i).size(); ++j) + { + cv::Point2f err = errVec.at(i).at(j); + double d0 = err.x - errMean(0); + double d1 = err.y - errMean(1); + + measurementCovariance(0,0) += d0 * d0; + measurementCovariance(0,1) += d0 * d1; + measurementCovariance(1,1) += d1 * d1; + } + } + measurementCovariance /= static_cast(errCount); + measurementCovariance(1,0) = measurementCovariance(0,1); + + m_measurementCovariance = measurementCovariance; + + return ret; +} + +int +CameraCalibration::sampleCount(void) const +{ + return m_imagePoints.size(); +} + +std::vector >& +CameraCalibration::imagePoints(void) +{ + return m_imagePoints; +} + +const std::vector >& +CameraCalibration::imagePoints(void) const +{ + return m_imagePoints; +} + +std::vector >& +CameraCalibration::scenePoints(void) +{ + return m_scenePoints; +} + +const std::vector >& +CameraCalibration::scenePoints(void) const +{ + return m_scenePoints; +} + +CameraPtr& +CameraCalibration::camera(void) +{ + return m_camera; +} + +const CameraConstPtr +CameraCalibration::camera(void) const +{ + return m_camera; +} + +Eigen::Matrix2d& +CameraCalibration::measurementCovariance(void) +{ + return m_measurementCovariance; +} + +const Eigen::Matrix2d& +CameraCalibration::measurementCovariance(void) const +{ + return m_measurementCovariance; +} + +cv::Mat& +CameraCalibration::cameraPoses(void) +{ + return m_cameraPoses; +} + +const cv::Mat& +CameraCalibration::cameraPoses(void) const +{ + return m_cameraPoses; +} + +void +CameraCalibration::drawResults(std::vector& images) const +{ + std::vector rvecs, tvecs; + + for (size_t i = 0; i < images.size(); ++i) + { + cv::Mat rvec(3, 1, CV_64F); + rvec.at(0) = m_cameraPoses.at(i,0); + rvec.at(1) = m_cameraPoses.at(i,1); + rvec.at(2) = m_cameraPoses.at(i,2); + + cv::Mat tvec(3, 1, CV_64F); + tvec.at(0) = m_cameraPoses.at(i,3); + tvec.at(1) = m_cameraPoses.at(i,4); + tvec.at(2) = m_cameraPoses.at(i,5); + + rvecs.push_back(rvec); + tvecs.push_back(tvec); + } + + int drawShiftBits = 4; + int drawMultiplier = 1 << drawShiftBits; + + cv::Scalar green(0, 255, 0); + cv::Scalar red(0, 0, 255); + + for (size_t i = 0; i < images.size(); ++i) + { + cv::Mat& image = images.at(i); + if (image.channels() == 1) + { + cv::cvtColor(image, image, CV_GRAY2RGB); + } + + std::vector estImagePoints; + m_camera->projectPoints(m_scenePoints.at(i), rvecs.at(i), tvecs.at(i), + estImagePoints); + + float errorSum = 0.0f; + float errorMax = std::numeric_limits::min(); + + for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) + { + cv::Point2f pObs = m_imagePoints.at(i).at(j); + cv::Point2f pEst = estImagePoints.at(j); + + cv::circle(image, + cv::Point(cvRound(pObs.x * drawMultiplier), + cvRound(pObs.y * drawMultiplier)), + 5, green, 2, CV_AA, drawShiftBits); + + cv::circle(image, + cv::Point(cvRound(pEst.x * drawMultiplier), + cvRound(pEst.y * drawMultiplier)), + 5, red, 2, CV_AA, drawShiftBits); + + float error = cv::norm(pObs - pEst); + + errorSum += error; + if (error > errorMax) + { + errorMax = error; + } + } + + std::ostringstream oss; + oss << "Reprojection error: avg = " << errorSum / m_imagePoints.at(i).size() + << " max = " << errorMax; + + cv::putText(image, oss.str(), cv::Point(10, image.rows - 10), + cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(255, 255, 255), + 1, CV_AA); + } +} + +void +CameraCalibration::writeParams(const std::string& filename) const +{ + m_camera->writeParametersToYamlFile(filename); +} + +bool +CameraCalibration::writeChessboardData(const std::string& filename) const +{ + std::ofstream ofs(filename.c_str(), std::ios::out | std::ios::binary); + if (!ofs.is_open()) + { + return false; + } + + writeData(ofs, m_boardSize.width); + writeData(ofs, m_boardSize.height); + writeData(ofs, m_squareSize); + + writeData(ofs, m_measurementCovariance(0,0)); + writeData(ofs, m_measurementCovariance(0,1)); + writeData(ofs, m_measurementCovariance(1,0)); + writeData(ofs, m_measurementCovariance(1,1)); + + writeData(ofs, m_cameraPoses.rows); + writeData(ofs, m_cameraPoses.cols); + writeData(ofs, m_cameraPoses.type()); + for (int i = 0; i < m_cameraPoses.rows; ++i) + { + for (int j = 0; j < m_cameraPoses.cols; ++j) + { + writeData(ofs, m_cameraPoses.at(i,j)); + } + } + + writeData(ofs, m_imagePoints.size()); + for (size_t i = 0; i < m_imagePoints.size(); ++i) + { + writeData(ofs, m_imagePoints.at(i).size()); + for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) + { + const cv::Point2f& ipt = m_imagePoints.at(i).at(j); + + writeData(ofs, ipt.x); + writeData(ofs, ipt.y); + } + } + + writeData(ofs, m_scenePoints.size()); + for (size_t i = 0; i < m_scenePoints.size(); ++i) + { + writeData(ofs, m_scenePoints.at(i).size()); + for (size_t j = 0; j < m_scenePoints.at(i).size(); ++j) + { + const cv::Point3f& spt = m_scenePoints.at(i).at(j); + + writeData(ofs, spt.x); + writeData(ofs, spt.y); + writeData(ofs, spt.z); + } + } + + return true; +} + +bool +CameraCalibration::readChessboardData(const std::string& filename) +{ + std::ifstream ifs(filename.c_str(), std::ios::in | std::ios::binary); + if (!ifs.is_open()) + { + return false; + } + + readData(ifs, m_boardSize.width); + readData(ifs, m_boardSize.height); + readData(ifs, m_squareSize); + + readData(ifs, m_measurementCovariance(0,0)); + readData(ifs, m_measurementCovariance(0,1)); + readData(ifs, m_measurementCovariance(1,0)); + readData(ifs, m_measurementCovariance(1,1)); + + int rows, cols, type; + readData(ifs, rows); + readData(ifs, cols); + readData(ifs, type); + m_cameraPoses = cv::Mat(rows, cols, type); + + for (int i = 0; i < m_cameraPoses.rows; ++i) + { + for (int j = 0; j < m_cameraPoses.cols; ++j) + { + readData(ifs, m_cameraPoses.at(i,j)); + } + } + + size_t nImagePointSets; + readData(ifs, nImagePointSets); + + m_imagePoints.clear(); + m_imagePoints.resize(nImagePointSets); + for (size_t i = 0; i < m_imagePoints.size(); ++i) + { + size_t nImagePoints; + readData(ifs, nImagePoints); + m_imagePoints.at(i).resize(nImagePoints); + + for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) + { + cv::Point2f& ipt = m_imagePoints.at(i).at(j); + readData(ifs, ipt.x); + readData(ifs, ipt.y); + } + } + + size_t nScenePointSets; + readData(ifs, nScenePointSets); + + m_scenePoints.clear(); + m_scenePoints.resize(nScenePointSets); + for (size_t i = 0; i < m_scenePoints.size(); ++i) + { + size_t nScenePoints; + readData(ifs, nScenePoints); + m_scenePoints.at(i).resize(nScenePoints); + + for (size_t j = 0; j < m_scenePoints.at(i).size(); ++j) + { + cv::Point3f& spt = m_scenePoints.at(i).at(j); + readData(ifs, spt.x); + readData(ifs, spt.y); + readData(ifs, spt.z); + } + } + + return true; +} + +void +CameraCalibration::setVerbose(bool verbose) +{ + m_verbose = verbose; +} + +bool +CameraCalibration::calibrateHelper(CameraPtr& camera, + std::vector& rvecs, std::vector& tvecs) const +{ + rvecs.assign(m_scenePoints.size(), cv::Mat()); + tvecs.assign(m_scenePoints.size(), cv::Mat()); + + // STEP 1: Estimate intrinsics + camera->estimateIntrinsics(m_boardSize, m_scenePoints, m_imagePoints); + + // STEP 2: Estimate extrinsics + for (size_t i = 0; i < m_scenePoints.size(); ++i) + { + camera->estimateExtrinsics(m_scenePoints.at(i), m_imagePoints.at(i), rvecs.at(i), tvecs.at(i)); + } + + if (m_verbose) + { + std::cout << "[" << camera->cameraName() << "] " + << "# INFO: " << "Initial reprojection error: " + << std::fixed << std::setprecision(3) + << camera->reprojectionError(m_scenePoints, m_imagePoints, rvecs, tvecs) + << " pixels" << std::endl; + } + + // STEP 3: optimization using ceres + optimize(camera, rvecs, tvecs); + + if (m_verbose) + { + double err = camera->reprojectionError(m_scenePoints, m_imagePoints, rvecs, tvecs); + std::cout << "[" << camera->cameraName() << "] " << "# INFO: Final reprojection error: " + << err << " pixels" << std::endl; + std::cout << "[" << camera->cameraName() << "] " << "# INFO: " + << camera->parametersToString() << std::endl; + } + + return true; +} + +void +CameraCalibration::optimize(CameraPtr& camera, + std::vector& rvecs, std::vector& tvecs) const +{ + // Use ceres to do optimization + ceres::Problem problem; + + std::vector > transformVec(rvecs.size()); + for (size_t i = 0; i < rvecs.size(); ++i) + { + Eigen::Vector3d rvec; + cv::cv2eigen(rvecs.at(i), rvec); + + transformVec.at(i).rotation() = Eigen::AngleAxisd(rvec.norm(), rvec.normalized()); + transformVec.at(i).translation() << tvecs[i].at(0), + tvecs[i].at(1), + tvecs[i].at(2); + } + + std::vector intrinsicCameraParams; + m_camera->writeParameters(intrinsicCameraParams); + + // create residuals for each observation + for (size_t i = 0; i < m_imagePoints.size(); ++i) + { + for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j) + { + const cv::Point3f& spt = m_scenePoints.at(i).at(j); + const cv::Point2f& ipt = m_imagePoints.at(i).at(j); + + ceres::CostFunction* costFunction = + CostFunctionFactory::instance()->generateCostFunction(camera, + Eigen::Vector3d(spt.x, spt.y, spt.z), + Eigen::Vector2d(ipt.x, ipt.y), + CAMERA_INTRINSICS | CAMERA_POSE); + + ceres::LossFunction* lossFunction = new ceres::CauchyLoss(1.0); + problem.AddResidualBlock(costFunction, lossFunction, + intrinsicCameraParams.data(), + transformVec.at(i).rotationData(), + transformVec.at(i).translationData()); + } + + ceres::LocalParameterization* quaternionParameterization = + new EigenQuaternionParameterization; + + problem.SetParameterization(transformVec.at(i).rotationData(), + quaternionParameterization); + } + + std::cout << "begin ceres" << std::endl; + ceres::Solver::Options options; + options.max_num_iterations = 1000; + options.num_threads = 1; + + if (m_verbose) + { + options.minimizer_progress_to_stdout = true; + } + + ceres::Solver::Summary summary; + ceres::Solve(options, &problem, &summary); + std::cout << "end ceres" << std::endl; + + if (m_verbose) + { + std::cout << summary.FullReport() << std::endl; + } + + camera->readParameters(intrinsicCameraParams); + + for (size_t i = 0; i < rvecs.size(); ++i) + { + Eigen::AngleAxisd aa(transformVec.at(i).rotation()); + + Eigen::Vector3d rvec = aa.angle() * aa.axis(); + cv::eigen2cv(rvec, rvecs.at(i)); + + cv::Mat& tvec = tvecs.at(i); + tvec.at(0) = transformVec.at(i).translation()(0); + tvec.at(1) = transformVec.at(i).translation()(1); + tvec.at(2) = transformVec.at(i).translation()(2); + } +} + +template +void +CameraCalibration::readData(std::ifstream& ifs, T& data) const +{ + char* buffer = new char[sizeof(T)]; + + ifs.read(buffer, sizeof(T)); + + data = *(reinterpret_cast(buffer)); + + delete[] buffer; +} + +template +void +CameraCalibration::writeData(std::ofstream& ofs, T data) const +{ + char* pData = reinterpret_cast(&data); + + ofs.write(pData, sizeof(T)); +} + +} diff --git a/camera_models/src/camera_models/Camera.cc b/camera_models/src/camera_models/Camera.cc new file mode 100644 index 0000000..0ba8dc3 --- /dev/null +++ b/camera_models/src/camera_models/Camera.cc @@ -0,0 +1,252 @@ +#include "camodocal/camera_models/Camera.h" +#include "camodocal/camera_models/ScaramuzzaCamera.h" + +#include + +namespace camodocal +{ + +Camera::Parameters::Parameters(ModelType modelType) + : m_modelType(modelType) + , m_imageWidth(0) + , m_imageHeight(0) +{ + switch (modelType) + { + case KANNALA_BRANDT: + m_nIntrinsics = 8; + break; + case PINHOLE: + m_nIntrinsics = 8; + break; + case SCARAMUZZA: + m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS; + break; + case MEI: + default: + m_nIntrinsics = 9; + } +} + +Camera::Parameters::Parameters(ModelType modelType, + const std::string& cameraName, + int w, int h) + : m_modelType(modelType) + , m_cameraName(cameraName) + , m_imageWidth(w) + , m_imageHeight(h) +{ + switch (modelType) + { + case KANNALA_BRANDT: + m_nIntrinsics = 8; + break; + case PINHOLE: + m_nIntrinsics = 8; + break; + case SCARAMUZZA: + m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS; + break; + case MEI: + default: + m_nIntrinsics = 9; + } +} + +Camera::ModelType& +Camera::Parameters::modelType(void) +{ + return m_modelType; +} + +std::string& +Camera::Parameters::cameraName(void) +{ + return m_cameraName; +} + +int& +Camera::Parameters::imageWidth(void) +{ + return m_imageWidth; +} + +int& +Camera::Parameters::imageHeight(void) +{ + return m_imageHeight; +} + +Camera::ModelType +Camera::Parameters::modelType(void) const +{ + return m_modelType; +} + +const std::string& +Camera::Parameters::cameraName(void) const +{ + return m_cameraName; +} + +int +Camera::Parameters::imageWidth(void) const +{ + return m_imageWidth; +} + +int +Camera::Parameters::imageHeight(void) const +{ + return m_imageHeight; +} + +int +Camera::Parameters::nIntrinsics(void) const +{ + return m_nIntrinsics; +} + +cv::Mat& +Camera::mask(void) +{ + return m_mask; +} + +const cv::Mat& +Camera::mask(void) const +{ + return m_mask; +} + +void +Camera::estimateExtrinsics(const std::vector& objectPoints, + const std::vector& imagePoints, + cv::Mat& rvec, cv::Mat& tvec) const +{ + std::vector Ms(imagePoints.size()); + for (size_t i = 0; i < Ms.size(); ++i) + { + Eigen::Vector3d P; + liftProjective(Eigen::Vector2d(imagePoints.at(i).x, imagePoints.at(i).y), P); + + P /= P(2); + + Ms.at(i).x = P(0); + Ms.at(i).y = P(1); + } + + // assume unit focal length, zero principal point, and zero distortion + cv::solvePnP(objectPoints, Ms, cv::Mat::eye(3, 3, CV_64F), cv::noArray(), rvec, tvec); +} + +double +Camera::reprojectionDist(const Eigen::Vector3d& P1, const Eigen::Vector3d& P2) const +{ + Eigen::Vector2d p1, p2; + + spaceToPlane(P1, p1); + spaceToPlane(P2, p2); + + return (p1 - p2).norm(); +} + +double +Camera::reprojectionError(const std::vector< std::vector >& objectPoints, + const std::vector< std::vector >& imagePoints, + const std::vector& rvecs, + const std::vector& tvecs, + cv::OutputArray _perViewErrors) const +{ + int imageCount = objectPoints.size(); + size_t pointsSoFar = 0; + double totalErr = 0.0; + + bool computePerViewErrors = _perViewErrors.needed(); + cv::Mat perViewErrors; + if (computePerViewErrors) + { + _perViewErrors.create(imageCount, 1, CV_64F); + perViewErrors = _perViewErrors.getMat(); + } + + for (int i = 0; i < imageCount; ++i) + { + size_t pointCount = imagePoints.at(i).size(); + + pointsSoFar += pointCount; + + std::vector estImagePoints; + projectPoints(objectPoints.at(i), rvecs.at(i), tvecs.at(i), + estImagePoints); + + double err = 0.0; + for (size_t j = 0; j < imagePoints.at(i).size(); ++j) + { + err += cv::norm(imagePoints.at(i).at(j) - estImagePoints.at(j)); + } + + if (computePerViewErrors) + { + perViewErrors.at(i) = err / pointCount; + } + + totalErr += err; + } + + return totalErr / pointsSoFar; +} + +double +Camera::reprojectionError(const Eigen::Vector3d& P, + const Eigen::Quaterniond& camera_q, + const Eigen::Vector3d& camera_t, + const Eigen::Vector2d& observed_p) const +{ + Eigen::Vector3d P_cam = camera_q.toRotationMatrix() * P + camera_t; + + Eigen::Vector2d p; + spaceToPlane(P_cam, p); + + return (p - observed_p).norm(); +} + +void +Camera::projectPoints(const std::vector& objectPoints, + const cv::Mat& rvec, + const cv::Mat& tvec, + std::vector& imagePoints) const +{ + // project 3D object points to the image plane + imagePoints.reserve(objectPoints.size()); + + //double + cv::Mat R0; + cv::Rodrigues(rvec, R0); + + Eigen::MatrixXd R(3,3); + R << R0.at(0,0), R0.at(0,1), R0.at(0,2), + R0.at(1,0), R0.at(1,1), R0.at(1,2), + R0.at(2,0), R0.at(2,1), R0.at(2,2); + + Eigen::Vector3d t; + t << tvec.at(0), tvec.at(1), tvec.at(2); + + for (size_t i = 0; i < objectPoints.size(); ++i) + { + const cv::Point3f& objectPoint = objectPoints.at(i); + + // Rotate and translate + Eigen::Vector3d P; + P << objectPoint.x, objectPoint.y, objectPoint.z; + + P = R * P + t; + + Eigen::Vector2d p; + spaceToPlane(P, p); + + imagePoints.push_back(cv::Point2f(p(0), p(1))); + } +} + +} diff --git a/camera_models/src/camera_models/CameraFactory.cc b/camera_models/src/camera_models/CameraFactory.cc new file mode 100644 index 0000000..42e9ad1 --- /dev/null +++ b/camera_models/src/camera_models/CameraFactory.cc @@ -0,0 +1,190 @@ +#include "camodocal/camera_models/CameraFactory.h" + +#include + +#include "camodocal/camera_models/CataCamera.h" +#include "camodocal/camera_models/EquidistantCamera.h" +#include "camodocal/camera_models/PinholeCamera.h" +#include "camodocal/camera_models/PinholeFullCamera.h" +#include "camodocal/camera_models/ScaramuzzaCamera.h" + +#include "ceres/ceres.h" + +namespace camodocal +{ + +boost::shared_ptr< CameraFactory > CameraFactory::m_instance; + +CameraFactory::CameraFactory( ) {} + +boost::shared_ptr< CameraFactory > +CameraFactory::instance( void ) +{ + if ( m_instance.get( ) == 0 ) + { + m_instance.reset( new CameraFactory ); + } + + return m_instance; +} + +CameraPtr +CameraFactory::generateCamera( Camera::ModelType modelType, const std::string& cameraName, cv::Size imageSize ) const +{ + switch ( modelType ) + { + case Camera::KANNALA_BRANDT: + { + EquidistantCameraPtr camera( new EquidistantCamera ); + + EquidistantCamera::Parameters params = camera->getParameters( ); + params.cameraName( ) = cameraName; + params.imageWidth( ) = imageSize.width; + params.imageHeight( ) = imageSize.height; + camera->setParameters( params ); + return camera; + } + case Camera::PINHOLE: + { + PinholeCameraPtr camera( new PinholeCamera ); + + PinholeCamera::Parameters params = camera->getParameters( ); + params.cameraName( ) = cameraName; + params.imageWidth( ) = imageSize.width; + params.imageHeight( ) = imageSize.height; + camera->setParameters( params ); + return camera; + } + case Camera::PINHOLE_FULL: + { + PinholeFullCameraPtr camera( new PinholeFullCamera ); + + PinholeFullCamera::Parameters params = camera->getParameters( ); + params.cameraName( ) = cameraName; + params.imageWidth( ) = imageSize.width; + params.imageHeight( ) = imageSize.height; + camera->setParameters( params ); + return camera; + } + case Camera::SCARAMUZZA: + { + OCAMCameraPtr camera( new OCAMCamera ); + + OCAMCamera::Parameters params = camera->getParameters( ); + params.cameraName( ) = cameraName; + params.imageWidth( ) = imageSize.width; + params.imageHeight( ) = imageSize.height; + camera->setParameters( params ); + return camera; + } + case Camera::MEI: + default: + { + CataCameraPtr camera( new CataCamera ); + + CataCamera::Parameters params = camera->getParameters( ); + params.cameraName( ) = cameraName; + params.imageWidth( ) = imageSize.width; + params.imageHeight( ) = imageSize.height; + camera->setParameters( params ); + return camera; + } + } +} + +CameraPtr +CameraFactory::generateCameraFromYamlFile( const std::string& filename ) +{ + cv::FileStorage fs( filename, cv::FileStorage::READ ); + + if ( !fs.isOpened( ) ) + { + return CameraPtr( ); + } + + Camera::ModelType modelType = Camera::MEI; + if ( !fs["model_type"].isNone( ) ) + { + std::string sModelType; + fs["model_type"] >> sModelType; + + if ( boost::iequals( sModelType, "kannala_brandt" ) ) + { + modelType = Camera::KANNALA_BRANDT; + } + else if ( boost::iequals( sModelType, "mei" ) ) + { + modelType = Camera::MEI; + } + else if ( boost::iequals( sModelType, "scaramuzza" ) ) + { + modelType = Camera::SCARAMUZZA; + } + else if ( boost::iequals( sModelType, "pinhole" ) ) + { + modelType = Camera::PINHOLE; + } + else if ( boost::iequals( sModelType, "PINHOLE_FULL" ) ) + { + modelType = Camera::PINHOLE_FULL; + } + else + { + std::cerr << "# ERROR: Unknown camera model: " << sModelType << std::endl; + return CameraPtr( ); + } + } + + switch ( modelType ) + { + case Camera::KANNALA_BRANDT: + { + EquidistantCameraPtr camera( new EquidistantCamera ); + + EquidistantCamera::Parameters params = camera->getParameters( ); + params.readFromYamlFile( filename ); + camera->setParameters( params ); + return camera; + } + case Camera::PINHOLE: + { + PinholeCameraPtr camera( new PinholeCamera ); + + PinholeCamera::Parameters params = camera->getParameters( ); + params.readFromYamlFile( filename ); + camera->setParameters( params ); + return camera; + } + case Camera::PINHOLE_FULL: + { + PinholeFullCameraPtr camera( new PinholeFullCamera ); + + PinholeFullCamera::Parameters params = camera->getParameters( ); + params.readFromYamlFile( filename ); + camera->setParameters( params ); + return camera; + } + case Camera::SCARAMUZZA: + { + OCAMCameraPtr camera( new OCAMCamera ); + + OCAMCamera::Parameters params = camera->getParameters( ); + params.readFromYamlFile( filename ); + camera->setParameters( params ); + return camera; + } + case Camera::MEI: + default: + { + CataCameraPtr camera( new CataCamera ); + + CataCamera::Parameters params = camera->getParameters( ); + params.readFromYamlFile( filename ); + camera->setParameters( params ); + return camera; + } + } + + return CameraPtr( ); +} +} diff --git a/camera_models/src/camera_models/CataCamera.cc b/camera_models/src/camera_models/CataCamera.cc new file mode 100644 index 0000000..7eacaa5 --- /dev/null +++ b/camera_models/src/camera_models/CataCamera.cc @@ -0,0 +1,1005 @@ +#include "camodocal/camera_models/CataCamera.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "camodocal/gpl/gpl.h" + +namespace camodocal +{ + +CataCamera::Parameters::Parameters() + : Camera::Parameters(MEI) + , m_xi(0.0) + , m_k1(0.0) + , m_k2(0.0) + , m_p1(0.0) + , m_p2(0.0) + , m_gamma1(0.0) + , m_gamma2(0.0) + , m_u0(0.0) + , m_v0(0.0) +{ + +} + +CataCamera::Parameters::Parameters(const std::string& cameraName, + int w, int h, + double xi, + double k1, double k2, + double p1, double p2, + double gamma1, double gamma2, + double u0, double v0) + : Camera::Parameters(MEI, cameraName, w, h) + , m_xi(xi) + , m_k1(k1) + , m_k2(k2) + , m_p1(p1) + , m_p2(p2) + , m_gamma1(gamma1) + , m_gamma2(gamma2) + , m_u0(u0) + , m_v0(v0) +{ +} + +double& +CataCamera::Parameters::xi(void) +{ + return m_xi; +} + +double& +CataCamera::Parameters::k1(void) +{ + return m_k1; +} + +double& +CataCamera::Parameters::k2(void) +{ + return m_k2; +} + +double& +CataCamera::Parameters::p1(void) +{ + return m_p1; +} + +double& +CataCamera::Parameters::p2(void) +{ + return m_p2; +} + +double& +CataCamera::Parameters::gamma1(void) +{ + return m_gamma1; +} + +double& +CataCamera::Parameters::gamma2(void) +{ + return m_gamma2; +} + +double& +CataCamera::Parameters::u0(void) +{ + return m_u0; +} + +double& +CataCamera::Parameters::v0(void) +{ + return m_v0; +} + +double +CataCamera::Parameters::xi(void) const +{ + return m_xi; +} + +double +CataCamera::Parameters::k1(void) const +{ + return m_k1; +} + +double +CataCamera::Parameters::k2(void) const +{ + return m_k2; +} + +double +CataCamera::Parameters::p1(void) const +{ + return m_p1; +} + +double +CataCamera::Parameters::p2(void) const +{ + return m_p2; +} + +double +CataCamera::Parameters::gamma1(void) const +{ + return m_gamma1; +} + +double +CataCamera::Parameters::gamma2(void) const +{ + return m_gamma2; +} + +double +CataCamera::Parameters::u0(void) const +{ + return m_u0; +} + +double +CataCamera::Parameters::v0(void) const +{ + return m_v0; +} + +bool +CataCamera::Parameters::readFromYamlFile(const std::string& filename) +{ + cv::FileStorage fs(filename, cv::FileStorage::READ); + + if (!fs.isOpened()) + { + return false; + } + + if (!fs["model_type"].isNone()) + { + std::string sModelType; + fs["model_type"] >> sModelType; + + if (sModelType.compare("MEI") != 0) + { + return false; + } + } + + m_modelType = MEI; + fs["camera_name"] >> m_cameraName; + m_imageWidth = static_cast(fs["image_width"]); + m_imageHeight = static_cast(fs["image_height"]); + + cv::FileNode n = fs["mirror_parameters"]; + m_xi = static_cast(n["xi"]); + + n = fs["distortion_parameters"]; + m_k1 = static_cast(n["k1"]); + m_k2 = static_cast(n["k2"]); + m_p1 = static_cast(n["p1"]); + m_p2 = static_cast(n["p2"]); + + n = fs["projection_parameters"]; + m_gamma1 = static_cast(n["gamma1"]); + m_gamma2 = static_cast(n["gamma2"]); + m_u0 = static_cast(n["u0"]); + m_v0 = static_cast(n["v0"]); + + return true; +} + +void +CataCamera::Parameters::writeToYamlFile(const std::string& filename) const +{ + cv::FileStorage fs(filename, cv::FileStorage::WRITE); + + fs << "model_type" << "MEI"; + fs << "camera_name" << m_cameraName; + fs << "image_width" << m_imageWidth; + fs << "image_height" << m_imageHeight; + + // mirror: xi + fs << "mirror_parameters"; + fs << "{" << "xi" << m_xi << "}"; + + // radial distortion: k1, k2 + // tangential distortion: p1, p2 + fs << "distortion_parameters"; + fs << "{" << "k1" << m_k1 + << "k2" << m_k2 + << "p1" << m_p1 + << "p2" << m_p2 << "}"; + + // projection: gamma1, gamma2, u0, v0 + fs << "projection_parameters"; + fs << "{" << "gamma1" << m_gamma1 + << "gamma2" << m_gamma2 + << "u0" << m_u0 + << "v0" << m_v0 << "}"; + + fs.release(); +} + +CataCamera::Parameters& +CataCamera::Parameters::operator=(const CataCamera::Parameters& other) +{ + if (this != &other) + { + m_modelType = other.m_modelType; + m_cameraName = other.m_cameraName; + m_imageWidth = other.m_imageWidth; + m_imageHeight = other.m_imageHeight; + m_xi = other.m_xi; + m_k1 = other.m_k1; + m_k2 = other.m_k2; + m_p1 = other.m_p1; + m_p2 = other.m_p2; + m_gamma1 = other.m_gamma1; + m_gamma2 = other.m_gamma2; + m_u0 = other.m_u0; + m_v0 = other.m_v0; + } + + return *this; +} + +std::ostream& +operator<< (std::ostream& out, const CataCamera::Parameters& params) +{ + out << "Camera Parameters:" << std::endl; + out << " model_type " << "MEI" << std::endl; + out << " camera_name " << params.m_cameraName << std::endl; + out << " image_width " << params.m_imageWidth << std::endl; + out << " image_height " << params.m_imageHeight << std::endl; + + out << "Mirror Parameters" << std::endl; + out << std::fixed << std::setprecision(10); + out << " xi " << params.m_xi << std::endl; + + // radial distortion: k1, k2 + // tangential distortion: p1, p2 + out << "Distortion Parameters" << std::endl; + out << " k1 " << params.m_k1 << std::endl + << " k2 " << params.m_k2 << std::endl + << " p1 " << params.m_p1 << std::endl + << " p2 " << params.m_p2 << std::endl; + + // projection: gamma1, gamma2, u0, v0 + out << "Projection Parameters" << std::endl; + out << " gamma1 " << params.m_gamma1 << std::endl + << " gamma2 " << params.m_gamma2 << std::endl + << " u0 " << params.m_u0 << std::endl + << " v0 " << params.m_v0 << std::endl; + + return out; +} + +CataCamera::CataCamera() + : m_inv_K11(1.0) + , m_inv_K13(0.0) + , m_inv_K22(1.0) + , m_inv_K23(0.0) + , m_noDistortion(true) +{ + +} + +CataCamera::CataCamera(const std::string& cameraName, + int imageWidth, int imageHeight, + double xi, double k1, double k2, double p1, double p2, + double gamma1, double gamma2, double u0, double v0) + : mParameters(cameraName, imageWidth, imageHeight, + xi, k1, k2, p1, p2, gamma1, gamma2, u0, v0) +{ + if ((mParameters.k1() == 0.0) && + (mParameters.k2() == 0.0) && + (mParameters.p1() == 0.0) && + (mParameters.p2() == 0.0)) + { + m_noDistortion = true; + } + else + { + m_noDistortion = false; + } + + // Inverse camera projection matrix parameters + m_inv_K11 = 1.0 / mParameters.gamma1(); + m_inv_K13 = -mParameters.u0() / mParameters.gamma1(); + m_inv_K22 = 1.0 / mParameters.gamma2(); + m_inv_K23 = -mParameters.v0() / mParameters.gamma2(); +} + +CataCamera::CataCamera(const CataCamera::Parameters& params) + : mParameters(params) +{ + if ((mParameters.k1() == 0.0) && + (mParameters.k2() == 0.0) && + (mParameters.p1() == 0.0) && + (mParameters.p2() == 0.0)) + { + m_noDistortion = true; + } + else + { + m_noDistortion = false; + } + + // Inverse camera projection matrix parameters + m_inv_K11 = 1.0 / mParameters.gamma1(); + m_inv_K13 = -mParameters.u0() / mParameters.gamma1(); + m_inv_K22 = 1.0 / mParameters.gamma2(); + m_inv_K23 = -mParameters.v0() / mParameters.gamma2(); +} + +Camera::ModelType +CataCamera::modelType(void) const +{ + return mParameters.modelType(); +} + +const std::string& +CataCamera::cameraName(void) const +{ + return mParameters.cameraName(); +} + +int +CataCamera::imageWidth(void) const +{ + return mParameters.imageWidth(); +} + +int +CataCamera::imageHeight(void) const +{ + return mParameters.imageHeight(); +} + +void +CataCamera::estimateIntrinsics(const cv::Size& boardSize, + const std::vector< std::vector >& objectPoints, + const std::vector< std::vector >& imagePoints) +{ + Parameters params = getParameters(); + + double u0 = params.imageWidth() / 2.0; + double v0 = params.imageHeight() / 2.0; + + double gamma0 = 0.0; + double minReprojErr = std::numeric_limits::max(); + + std::vector rvecs, tvecs; + rvecs.assign(objectPoints.size(), cv::Mat()); + tvecs.assign(objectPoints.size(), cv::Mat()); + + params.xi() = 1.0; + params.k1() = 0.0; + params.k2() = 0.0; + params.p1() = 0.0; + params.p2() = 0.0; + params.u0() = u0; + params.v0() = v0; + + // Initialize gamma (focal length) + // Use non-radial line image and xi = 1 + for (size_t i = 0; i < imagePoints.size(); ++i) + { + for (int r = 0; r < boardSize.height; ++r) + { + cv::Mat P(boardSize.width, 4, CV_64F); + for (int c = 0; c < boardSize.width; ++c) + { + const cv::Point2f& imagePoint = imagePoints.at(i).at(r * boardSize.width + c); + + double u = imagePoint.x - u0; + double v = imagePoint.y - v0; + + P.at(c, 0) = u; + P.at(c, 1) = v; + P.at(c, 2) = 0.5; + P.at(c, 3) = -0.5 * (square(u) + square(v)); + } + + cv::Mat C; + cv::SVD::solveZ(P, C); + + double t = square(C.at(0)) + square(C.at(1)) + C.at(2) * C.at(3); + if (t < 0.0) + { + continue; + } + + // check that line image is not radial + double d = sqrt(1.0 / t); + double nx = C.at(0) * d; + double ny = C.at(1) * d; + if (hypot(nx, ny) > 0.95) + { + continue; + } + + double gamma = sqrt(C.at(2) / C.at(3)); + + params.gamma1() = gamma; + params.gamma2() = gamma; + setParameters(params); + + for (size_t j = 0; j < objectPoints.size(); ++j) + { + estimateExtrinsics(objectPoints.at(j), imagePoints.at(j), rvecs.at(j), tvecs.at(j)); + } + + double reprojErr = reprojectionError(objectPoints, imagePoints, rvecs, tvecs, cv::noArray()); + + if (reprojErr < minReprojErr) + { + minReprojErr = reprojErr; + gamma0 = gamma; + } + } + } + + if (gamma0 <= 0.0 && minReprojErr >= std::numeric_limits::max()) + { + std::cout << "[" << params.cameraName() << "] " + << "# INFO: CataCamera model fails with given data. " << std::endl; + + return; + } + + params.gamma1() = gamma0; + params.gamma2() = gamma0; + setParameters(params); +} + +/** + * \brief Lifts a point from the image plane to the unit sphere + * + * \param p image coordinates + * \param P coordinates of the point on the sphere + */ +void +CataCamera::liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const +{ + double mx_d, my_d,mx2_d, mxy_d, my2_d, mx_u, my_u; + double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d; + double lambda; + + // Lift points to normalised plane + mx_d = m_inv_K11 * p(0) + m_inv_K13; + my_d = m_inv_K22 * p(1) + m_inv_K23; + + if (m_noDistortion) + { + mx_u = mx_d; + my_u = my_d; + } + else + { + // Apply inverse distortion model + if (0) + { + double k1 = mParameters.k1(); + double k2 = mParameters.k2(); + double p1 = mParameters.p1(); + double p2 = mParameters.p2(); + + // Inverse distortion model + // proposed by Heikkila + mx2_d = mx_d*mx_d; + my2_d = my_d*my_d; + mxy_d = mx_d*my_d; + rho2_d = mx2_d+my2_d; + rho4_d = rho2_d*rho2_d; + radDist_d = k1*rho2_d+k2*rho4_d; + Dx_d = mx_d*radDist_d + p2*(rho2_d+2*mx2_d) + 2*p1*mxy_d; + Dy_d = my_d*radDist_d + p1*(rho2_d+2*my2_d) + 2*p2*mxy_d; + inv_denom_d = 1/(1+4*k1*rho2_d+6*k2*rho4_d+8*p1*my_d+8*p2*mx_d); + + mx_u = mx_d - inv_denom_d*Dx_d; + my_u = my_d - inv_denom_d*Dy_d; + } + else + { + // Recursive distortion model + int n = 6; + Eigen::Vector2d d_u; + distortion(Eigen::Vector2d(mx_d, my_d), d_u); + // Approximate value + mx_u = mx_d - d_u(0); + my_u = my_d - d_u(1); + + for (int i = 1; i < n; ++i) + { + distortion(Eigen::Vector2d(mx_u, my_u), d_u); + mx_u = mx_d - d_u(0); + my_u = my_d - d_u(1); + } + } + } + + // Lift normalised points to the sphere (inv_hslash) + double xi = mParameters.xi(); + if (xi == 1.0) + { + lambda = 2.0 / (mx_u * mx_u + my_u * my_u + 1.0); + P << lambda * mx_u, lambda * my_u, lambda - 1.0; + } + else + { + lambda = (xi + sqrt(1.0 + (1.0 - xi * xi) * (mx_u * mx_u + my_u * my_u))) / (1.0 + mx_u * mx_u + my_u * my_u); + P << lambda * mx_u, lambda * my_u, lambda - xi; + } +} + +/** + * \brief Lifts a point from the image plane to its projective ray + * + * \param p image coordinates + * \param P coordinates of the projective ray + */ +void +CataCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const +{ + double mx_d, my_d,mx2_d, mxy_d, my2_d, mx_u, my_u; + double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d; + //double lambda; + + // Lift points to normalised plane + mx_d = m_inv_K11 * p(0) + m_inv_K13; + my_d = m_inv_K22 * p(1) + m_inv_K23; + + if (m_noDistortion) + { + mx_u = mx_d; + my_u = my_d; + } + else + { + if (0) + { + double k1 = mParameters.k1(); + double k2 = mParameters.k2(); + double p1 = mParameters.p1(); + double p2 = mParameters.p2(); + + // Apply inverse distortion model + // proposed by Heikkila + mx2_d = mx_d*mx_d; + my2_d = my_d*my_d; + mxy_d = mx_d*my_d; + rho2_d = mx2_d+my2_d; + rho4_d = rho2_d*rho2_d; + radDist_d = k1*rho2_d+k2*rho4_d; + Dx_d = mx_d*radDist_d + p2*(rho2_d+2*mx2_d) + 2*p1*mxy_d; + Dy_d = my_d*radDist_d + p1*(rho2_d+2*my2_d) + 2*p2*mxy_d; + inv_denom_d = 1/(1+4*k1*rho2_d+6*k2*rho4_d+8*p1*my_d+8*p2*mx_d); + + mx_u = mx_d - inv_denom_d*Dx_d; + my_u = my_d - inv_denom_d*Dy_d; + } + else + { + // Recursive distortion model + int n = 8; + Eigen::Vector2d d_u; + distortion(Eigen::Vector2d(mx_d, my_d), d_u); + // Approximate value + mx_u = mx_d - d_u(0); + my_u = my_d - d_u(1); + + for (int i = 1; i < n; ++i) + { + distortion(Eigen::Vector2d(mx_u, my_u), d_u); + mx_u = mx_d - d_u(0); + my_u = my_d - d_u(1); + } + } + } + + // Obtain a projective ray + double xi = mParameters.xi(); + if (xi == 1.0) + { + P << mx_u, my_u, (1.0 - mx_u * mx_u - my_u * my_u) / 2.0; + } + else + { + // Reuse variable + rho2_d = mx_u * mx_u + my_u * my_u; + P << mx_u, my_u, 1.0 - xi * (rho2_d + 1.0) / (xi + sqrt(1.0 + (1.0 - xi * xi) * rho2_d)); + } +} + + +/** + * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v) + * + * \param P 3D point coordinates + * \param p return value, contains the image point coordinates + */ +void +CataCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const +{ + Eigen::Vector2d p_u, p_d; + + // Project points to the normalised plane + double z = P(2) + mParameters.xi() * P.norm(); + p_u << P(0) / z, P(1) / z; + + if (m_noDistortion) + { + p_d = p_u; + } + else + { + // Apply distortion + Eigen::Vector2d d_u; + distortion(p_u, d_u); + p_d = p_u + d_u; + } + + // Apply generalised projection matrix + p << mParameters.gamma1() * p_d(0) + mParameters.u0(), + mParameters.gamma2() * p_d(1) + mParameters.v0(); +} + +#if 0 +/** + * \brief Project a 3D point to the image plane and calculate Jacobian + * + * \param P 3D point coordinates + * \param p return value, contains the image point coordinates + */ +void +CataCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, + Eigen::Matrix& J) const +{ + double xi = mParameters.xi(); + + Eigen::Vector2d p_u, p_d; + double norm, inv_denom; + double dxdmx, dydmx, dxdmy, dydmy; + + norm = P.norm(); + // Project points to the normalised plane + inv_denom = 1.0 / (P(2) + xi * norm); + p_u << inv_denom * P(0), inv_denom * P(1); + + // Calculate jacobian + inv_denom = inv_denom * inv_denom / norm; + double dudx = inv_denom * (norm * P(2) + xi * (P(1) * P(1) + P(2) * P(2))); + double dvdx = -inv_denom * xi * P(0) * P(1); + double dudy = dvdx; + double dvdy = inv_denom * (norm * P(2) + xi * (P(0) * P(0) + P(2) * P(2))); + inv_denom = inv_denom * (-xi * P(2) - norm); // reuse variable + double dudz = P(0) * inv_denom; + double dvdz = P(1) * inv_denom; + + if (m_noDistortion) + { + p_d = p_u; + } + else + { + // Apply distortion + Eigen::Vector2d d_u; + distortion(p_u, d_u); + p_d = p_u + d_u; + } + + double gamma1 = mParameters.gamma1(); + double gamma2 = mParameters.gamma2(); + + // Make the product of the jacobians + // and add projection matrix jacobian + inv_denom = gamma1 * (dudx * dxdmx + dvdx * dxdmy); // reuse + dvdx = gamma2 * (dudx * dydmx + dvdx * dydmy); + dudx = inv_denom; + + inv_denom = gamma1 * (dudy * dxdmx + dvdy * dxdmy); // reuse + dvdy = gamma2 * (dudy * dydmx + dvdy * dydmy); + dudy = inv_denom; + + inv_denom = gamma1 * (dudz * dxdmx + dvdz * dxdmy); // reuse + dvdz = gamma2 * (dudz * dydmx + dvdz * dydmy); + dudz = inv_denom; + + // Apply generalised projection matrix + p << gamma1 * p_d(0) + mParameters.u0(), + gamma2 * p_d(1) + mParameters.v0(); + + J << dudx, dudy, dudz, + dvdx, dvdy, dvdz; +} +#endif + +/** + * \brief Projects an undistorted 2D point p_u to the image plane + * + * \param p_u 2D point coordinates + * \return image point coordinates + */ +void +CataCamera::undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const +{ + Eigen::Vector2d p_d; + + if (m_noDistortion) + { + p_d = p_u; + } + else + { + // Apply distortion + Eigen::Vector2d d_u; + distortion(p_u, d_u); + p_d = p_u + d_u; + } + + // Apply generalised projection matrix + p << mParameters.gamma1() * p_d(0) + mParameters.u0(), + mParameters.gamma2() * p_d(1) + mParameters.v0(); +} + +/** + * \brief Apply distortion to input point (from the normalised plane) + * + * \param p_u undistorted coordinates of point on the normalised plane + * \return to obtain the distorted point: p_d = p_u + d_u + */ +void +CataCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const +{ + double k1 = mParameters.k1(); + double k2 = mParameters.k2(); + double p1 = mParameters.p1(); + double p2 = mParameters.p2(); + + double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u; + + mx2_u = p_u(0) * p_u(0); + my2_u = p_u(1) * p_u(1); + mxy_u = p_u(0) * p_u(1); + rho2_u = mx2_u + my2_u; + rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u; + d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u), + p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u); +} + +/** + * \brief Apply distortion to input point (from the normalised plane) + * and calculate Jacobian + * + * \param p_u undistorted coordinates of point on the normalised plane + * \return to obtain the distorted point: p_d = p_u + d_u + */ +void +CataCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, + Eigen::Matrix2d& J) const +{ + double k1 = mParameters.k1(); + double k2 = mParameters.k2(); + double p1 = mParameters.p1(); + double p2 = mParameters.p2(); + + double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u; + + mx2_u = p_u(0) * p_u(0); + my2_u = p_u(1) * p_u(1); + mxy_u = p_u(0) * p_u(1); + rho2_u = mx2_u + my2_u; + rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u; + d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u), + p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u); + + double dxdmx = 1.0 + rad_dist_u + k1 * 2.0 * mx2_u + k2 * rho2_u * 4.0 * mx2_u + 2.0 * p1 * p_u(1) + 6.0 * p2 * p_u(0); + double dydmx = k1 * 2.0 * p_u(0) * p_u(1) + k2 * 4.0 * rho2_u * p_u(0) * p_u(1) + p1 * 2.0 * p_u(0) + 2.0 * p2 * p_u(1); + double dxdmy = dydmx; + double dydmy = 1.0 + rad_dist_u + k1 * 2.0 * my2_u + k2 * rho2_u * 4.0 * my2_u + 6.0 * p1 * p_u(1) + 2.0 * p2 * p_u(0); + + J << dxdmx, dxdmy, + dydmx, dydmy; +} + +void +CataCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const +{ + cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight()); + + cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F); + cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F); + + for (int v = 0; v < imageSize.height; ++v) + { + for (int u = 0; u < imageSize.width; ++u) + { + double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale; + double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale; + + double xi = mParameters.xi(); + double d2 = mx_u * mx_u + my_u * my_u; + + Eigen::Vector3d P; + P << mx_u, my_u, 1.0 - xi * (d2 + 1.0) / (xi + sqrt(1.0 + (1.0 - xi * xi) * d2)); + + Eigen::Vector2d p; + spaceToPlane(P, p); + + mapX.at(v,u) = p(0); + mapY.at(v,u) = p(1); + } + } + + cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); +} + +cv::Mat +CataCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, + float fx, float fy, + cv::Size imageSize, + float cx, float cy, + cv::Mat rmat) const +{ + if (imageSize == cv::Size(0, 0)) + { + imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight()); + } + + cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); + cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); + + Eigen::Matrix3f K_rect; + + if (cx == -1.0f && cy == -1.0f) + { + K_rect << fx, 0, imageSize.width / 2, + 0, fy, imageSize.height / 2, + 0, 0, 1; + } + else + { + K_rect << fx, 0, cx, + 0, fy, cy, + 0, 0, 1; + } + + if (fx == -1.0f || fy == -1.0f) + { + K_rect(0,0) = mParameters.gamma1(); + K_rect(1,1) = mParameters.gamma2(); + } + + Eigen::Matrix3f K_rect_inv = K_rect.inverse(); + + Eigen::Matrix3f R, R_inv; + cv::cv2eigen(rmat, R); + R_inv = R.inverse(); + + for (int v = 0; v < imageSize.height; ++v) + { + for (int u = 0; u < imageSize.width; ++u) + { + Eigen::Vector3f xo; + xo << u, v, 1; + + Eigen::Vector3f uo = R_inv * K_rect_inv * xo; + + Eigen::Vector2d p; + spaceToPlane(uo.cast(), p); + + mapX.at(v,u) = p(0); + mapY.at(v,u) = p(1); + } + } + + cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); + + cv::Mat K_rect_cv; + cv::eigen2cv(K_rect, K_rect_cv); + return K_rect_cv; +} + +int +CataCamera::parameterCount(void) const +{ + return 9; +} + +const CataCamera::Parameters& +CataCamera::getParameters(void) const +{ + return mParameters; +} + +void +CataCamera::setParameters(const CataCamera::Parameters& parameters) +{ + mParameters = parameters; + + if ((mParameters.k1() == 0.0) && + (mParameters.k2() == 0.0) && + (mParameters.p1() == 0.0) && + (mParameters.p2() == 0.0)) + { + m_noDistortion = true; + } + else + { + m_noDistortion = false; + } + + m_inv_K11 = 1.0 / mParameters.gamma1(); + m_inv_K13 = -mParameters.u0() / mParameters.gamma1(); + m_inv_K22 = 1.0 / mParameters.gamma2(); + m_inv_K23 = -mParameters.v0() / mParameters.gamma2(); +} + +void +CataCamera::readParameters(const std::vector& parameterVec) +{ + if ((int)parameterVec.size() != parameterCount()) + { + return; + } + + Parameters params = getParameters(); + + params.xi() = parameterVec.at(0); + params.k1() = parameterVec.at(1); + params.k2() = parameterVec.at(2); + params.p1() = parameterVec.at(3); + params.p2() = parameterVec.at(4); + params.gamma1() = parameterVec.at(5); + params.gamma2() = parameterVec.at(6); + params.u0() = parameterVec.at(7); + params.v0() = parameterVec.at(8); + + setParameters(params); +} + +void +CataCamera::writeParameters(std::vector& parameterVec) const +{ + parameterVec.resize(parameterCount()); + parameterVec.at(0) = mParameters.xi(); + parameterVec.at(1) = mParameters.k1(); + parameterVec.at(2) = mParameters.k2(); + parameterVec.at(3) = mParameters.p1(); + parameterVec.at(4) = mParameters.p2(); + parameterVec.at(5) = mParameters.gamma1(); + parameterVec.at(6) = mParameters.gamma2(); + parameterVec.at(7) = mParameters.u0(); + parameterVec.at(8) = mParameters.v0(); +} + +void +CataCamera::writeParametersToYamlFile(const std::string& filename) const +{ + mParameters.writeToYamlFile(filename); +} + +std::string +CataCamera::parametersToString(void) const +{ + std::ostringstream oss; + oss << mParameters; + + return oss.str(); +} + +} diff --git a/camera_models/src/camera_models/CostFunctionFactory.cc b/camera_models/src/camera_models/CostFunctionFactory.cc new file mode 100644 index 0000000..2bcdb14 --- /dev/null +++ b/camera_models/src/camera_models/CostFunctionFactory.cc @@ -0,0 +1,1384 @@ +#include "camodocal/camera_models/CostFunctionFactory.h" + +#include "camodocal/camera_models/CataCamera.h" +#include "camodocal/camera_models/EquidistantCamera.h" +#include "camodocal/camera_models/PinholeCamera.h" +#include "camodocal/camera_models/PinholeFullCamera.h" +#include "camodocal/camera_models/ScaramuzzaCamera.h" +#include "ceres/ceres.h" + +namespace camodocal +{ + +template< typename T > +void +worldToCameraTransform( const T* const q_cam_odo, + const T* const t_cam_odo, + const T* const p_odo, + const T* const att_odo, + T* q, + T* t, + bool optimize_cam_odo_z = true ) +{ + Eigen::Quaternion< T > q_z_inv( cos( att_odo[0] / T( 2 ) ), T( 0 ), T( 0 ), -sin( att_odo[0] / T( 2 ) ) ); + Eigen::Quaternion< T > q_y_inv( cos( att_odo[1] / T( 2 ) ), T( 0 ), -sin( att_odo[1] / T( 2 ) ), T( 0 ) ); + Eigen::Quaternion< T > q_x_inv( cos( att_odo[2] / T( 2 ) ), -sin( att_odo[2] / T( 2 ) ), T( 0 ), T( 0 ) ); + + Eigen::Quaternion< T > q_zyx_inv = q_x_inv * q_y_inv * q_z_inv; + + T q_odo[4] = { q_zyx_inv.w( ), q_zyx_inv.x( ), q_zyx_inv.y( ), q_zyx_inv.z( ) }; + + T q_odo_cam[4] = { q_cam_odo[3], -q_cam_odo[0], -q_cam_odo[1], -q_cam_odo[2] }; + + T q0[4]; + ceres::QuaternionProduct( q_odo_cam, q_odo, q0 ); + + T t0[3]; + T t_odo[3] = { p_odo[0], p_odo[1], p_odo[2] }; + + ceres::QuaternionRotatePoint( q_odo, t_odo, t0 ); + + t0[0] += t_cam_odo[0]; + t0[1] += t_cam_odo[1]; + + if ( optimize_cam_odo_z ) + { + t0[2] += t_cam_odo[2]; + } + + ceres::QuaternionRotatePoint( q_odo_cam, t0, t ); + t[0] = -t[0]; + t[1] = -t[1]; + t[2] = -t[2]; + + // Convert quaternion from Ceres convention (w, x, y, z) + // to Eigen convention (x, y, z, w) + q[0] = q0[1]; + q[1] = q0[2]; + q[2] = q0[3]; + q[3] = q0[0]; +} + +template< class CameraT > +class ReprojectionError1 +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + ReprojectionError1( const Eigen::Vector3d& observed_P, const Eigen::Vector2d& observed_p ) + : m_observed_P( observed_P ) + , m_observed_p( observed_p ) + , m_sqrtPrecisionMat( Eigen::Matrix2d::Identity( ) ) + { + } + + ReprojectionError1( const Eigen::Vector3d& observed_P, + const Eigen::Vector2d& observed_p, + const Eigen::Matrix2d& sqrtPrecisionMat ) + : m_observed_P( observed_P ) + , m_observed_p( observed_p ) + , m_sqrtPrecisionMat( sqrtPrecisionMat ) + { + } + + ReprojectionError1( const std::vector< double >& intrinsic_params, + const Eigen::Vector3d& observed_P, + const Eigen::Vector2d& observed_p ) + : m_intrinsic_params( intrinsic_params ) + , m_observed_P( observed_P ) + , m_observed_p( observed_p ) + { + } + + // variables: camera intrinsics and camera extrinsics + template< typename T > + bool operator( )( const T* const intrinsic_params, const T* const q, const T* const t, T* residuals ) const + { + Eigen::Matrix< T, 3, 1 > P = m_observed_P.cast< T >( ); + + Eigen::Matrix< T, 2, 1 > predicted_p; + CameraT::spaceToPlane( intrinsic_params, q, t, P, predicted_p ); + + Eigen::Matrix< T, 2, 1 > e = predicted_p - m_observed_p.cast< T >( ); + + Eigen::Matrix< T, 2, 1 > e_weighted = m_sqrtPrecisionMat.cast< T >( ) * e; + + residuals[0] = e_weighted( 0 ); + residuals[1] = e_weighted( 1 ); + + return true; + } + + // variables: camera-odometry transforms and odometry poses + template< typename T > + bool operator( )( const T* const q_cam_odo, + const T* const t_cam_odo, + const T* const p_odo, + const T* const att_odo, + T* residuals ) const + { + T q[4], t[3]; + worldToCameraTransform( q_cam_odo, t_cam_odo, p_odo, att_odo, q, t ); + + Eigen::Matrix< T, 3, 1 > P = m_observed_P.cast< T >( ); + + std::vector< T > intrinsic_params( m_intrinsic_params.begin( ), m_intrinsic_params.end( ) ); + + // project 3D object point to the image plane + Eigen::Matrix< T, 2, 1 > predicted_p; + CameraT::spaceToPlane( intrinsic_params.data( ), q, t, P, predicted_p ); + + residuals[0] = predicted_p( 0 ) - T( m_observed_p( 0 ) ); + residuals[1] = predicted_p( 1 ) - T( m_observed_p( 1 ) ); + + return true; + } + + // private: + // camera intrinsics + std::vector< double > m_intrinsic_params; + + // observed 3D point + Eigen::Vector3d m_observed_P; + + // observed 2D point + Eigen::Vector2d m_observed_p; + + // square root of precision matrix + Eigen::Matrix2d m_sqrtPrecisionMat; +}; + +// variables: camera extrinsics, 3D point +template< class CameraT > +class ReprojectionError2 +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + ReprojectionError2( const std::vector< double >& intrinsic_params, const Eigen::Vector2d& observed_p ) + : m_intrinsic_params( intrinsic_params ) + , m_observed_p( observed_p ) + { + } + + template< typename T > + bool operator( )( const T* const q, const T* const t, const T* const point, T* residuals ) const + { + Eigen::Matrix< T, 3, 1 > P; + P( 0 ) = T( point[0] ); + P( 1 ) = T( point[1] ); + P( 2 ) = T( point[2] ); + + std::vector< T > intrinsic_params( m_intrinsic_params.begin( ), m_intrinsic_params.end( ) ); + + // project 3D object point to the image plane + Eigen::Matrix< T, 2, 1 > predicted_p; + CameraT::spaceToPlane( intrinsic_params.data( ), q, t, P, predicted_p ); + + residuals[0] = predicted_p( 0 ) - T( m_observed_p( 0 ) ); + residuals[1] = predicted_p( 1 ) - T( m_observed_p( 1 ) ); + + return true; + } + + private: + // camera intrinsics + std::vector< double > m_intrinsic_params; + + // observed 2D point + Eigen::Vector2d m_observed_p; +}; + +template< class CameraT > +class ReprojectionError3 +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + ReprojectionError3( const Eigen::Vector2d& observed_p ) + : m_observed_p( observed_p ) + , m_sqrtPrecisionMat( Eigen::Matrix2d::Identity( ) ) + , m_optimize_cam_odo_z( true ) + { + } + + ReprojectionError3( const Eigen::Vector2d& observed_p, const Eigen::Matrix2d& sqrtPrecisionMat ) + : m_observed_p( observed_p ) + , m_sqrtPrecisionMat( sqrtPrecisionMat ) + , m_optimize_cam_odo_z( true ) + { + } + + ReprojectionError3( const std::vector< double >& intrinsic_params, const Eigen::Vector2d& observed_p ) + : m_intrinsic_params( intrinsic_params ) + , m_observed_p( observed_p ) + , m_sqrtPrecisionMat( Eigen::Matrix2d::Identity( ) ) + , m_optimize_cam_odo_z( true ) + { + } + + ReprojectionError3( const std::vector< double >& intrinsic_params, + const Eigen::Vector2d& observed_p, + const Eigen::Matrix2d& sqrtPrecisionMat ) + : m_intrinsic_params( intrinsic_params ) + , m_observed_p( observed_p ) + , m_sqrtPrecisionMat( sqrtPrecisionMat ) + , m_optimize_cam_odo_z( true ) + { + } + + ReprojectionError3( const std::vector< double >& intrinsic_params, + const Eigen::Vector3d& odo_pos, + const Eigen::Vector3d& odo_att, + const Eigen::Vector2d& observed_p, + bool optimize_cam_odo_z ) + : m_intrinsic_params( intrinsic_params ) + , m_odo_pos( odo_pos ) + , m_odo_att( odo_att ) + , m_observed_p( observed_p ) + , m_optimize_cam_odo_z( optimize_cam_odo_z ) + { + } + + ReprojectionError3( const std::vector< double >& intrinsic_params, + const Eigen::Quaterniond& cam_odo_q, + const Eigen::Vector3d& cam_odo_t, + const Eigen::Vector3d& odo_pos, + const Eigen::Vector3d& odo_att, + const Eigen::Vector2d& observed_p ) + : m_intrinsic_params( intrinsic_params ) + , m_cam_odo_q( cam_odo_q ) + , m_cam_odo_t( cam_odo_t ) + , m_odo_pos( odo_pos ) + , m_odo_att( odo_att ) + , m_observed_p( observed_p ) + , m_optimize_cam_odo_z( true ) + { + } + + // variables: camera intrinsics, camera-to-odometry transform, + // odometry extrinsics, 3D point + template< typename T > + bool operator( )( const T* const intrinsic_params, + const T* const q_cam_odo, + const T* const t_cam_odo, + const T* const p_odo, + const T* const att_odo, + const T* const point, + T* residuals ) const + { + T q[4], t[3]; + worldToCameraTransform( q_cam_odo, t_cam_odo, p_odo, att_odo, q, t, m_optimize_cam_odo_z ); + + Eigen::Matrix< T, 3, 1 > P( point[0], point[1], point[2] ); + + // project 3D object point to the image plane + Eigen::Matrix< T, 2, 1 > predicted_p; + CameraT::spaceToPlane( intrinsic_params, q, t, P, predicted_p ); + + Eigen::Matrix< T, 2, 1 > err = predicted_p - m_observed_p.cast< T >( ); + Eigen::Matrix< T, 2, 1 > err_weighted = m_sqrtPrecisionMat.cast< T >( ) * err; + + residuals[0] = err_weighted( 0 ); + residuals[1] = err_weighted( 1 ); + + return true; + } + + // variables: camera-to-odometry transform, 3D point + template< typename T > + bool operator( )( const T* const q_cam_odo, const T* const t_cam_odo, const T* const point, T* residuals ) const + { + T p_odo[3] = { T( m_odo_pos( 0 ) ), T( m_odo_pos( 1 ) ), T( m_odo_pos( 2 ) ) }; + T att_odo[3] = { T( m_odo_att( 0 ) ), T( m_odo_att( 1 ) ), T( m_odo_att( 2 ) ) }; + T q[4], t[3]; + + worldToCameraTransform( q_cam_odo, t_cam_odo, p_odo, att_odo, q, t, m_optimize_cam_odo_z ); + + std::vector< T > intrinsic_params( m_intrinsic_params.begin( ), m_intrinsic_params.end( ) ); + Eigen::Matrix< T, 3, 1 > P( point[0], point[1], point[2] ); + + // project 3D object point to the image plane + Eigen::Matrix< T, 2, 1 > predicted_p; + CameraT::spaceToPlane( intrinsic_params.data( ), q, t, P, predicted_p ); + + residuals[0] = predicted_p( 0 ) - T( m_observed_p( 0 ) ); + residuals[1] = predicted_p( 1 ) - T( m_observed_p( 1 ) ); + + return true; + } + + // variables: camera-to-odometry transform, odometry extrinsics, 3D point + template< typename T > + bool operator( )( const T* const q_cam_odo, + const T* const t_cam_odo, + const T* const p_odo, + const T* const att_odo, + const T* const point, + T* residuals ) const + { + T q[4], t[3]; + worldToCameraTransform( q_cam_odo, t_cam_odo, p_odo, att_odo, q, t, m_optimize_cam_odo_z ); + + std::vector< T > intrinsic_params( m_intrinsic_params.begin( ), m_intrinsic_params.end( ) ); + Eigen::Matrix< T, 3, 1 > P( point[0], point[1], point[2] ); + + // project 3D object point to the image plane + Eigen::Matrix< T, 2, 1 > predicted_p; + CameraT::spaceToPlane( intrinsic_params.data( ), q, t, P, predicted_p ); + + Eigen::Matrix< T, 2, 1 > err = predicted_p - m_observed_p.cast< T >( ); + Eigen::Matrix< T, 2, 1 > err_weighted = m_sqrtPrecisionMat.cast< T >( ) * err; + + residuals[0] = err_weighted( 0 ); + residuals[1] = err_weighted( 1 ); + + return true; + } + + // variables: 3D point + template< typename T > + bool operator( )( const T* const point, T* residuals ) const + { + T q_cam_odo[4] = { T( m_cam_odo_q.coeffs( )( 0 ) ), + T( m_cam_odo_q.coeffs( )( 1 ) ), + T( m_cam_odo_q.coeffs( )( 2 ) ), + T( m_cam_odo_q.coeffs( )( 3 ) ) }; + T t_cam_odo[3] = { T( m_cam_odo_t( 0 ) ), T( m_cam_odo_t( 1 ) ), T( m_cam_odo_t( 2 ) ) }; + T p_odo[3] = { T( m_odo_pos( 0 ) ), T( m_odo_pos( 1 ) ), T( m_odo_pos( 2 ) ) }; + T att_odo[3] = { T( m_odo_att( 0 ) ), T( m_odo_att( 1 ) ), T( m_odo_att( 2 ) ) }; + T q[4], t[3]; + + worldToCameraTransform( q_cam_odo, t_cam_odo, p_odo, att_odo, q, t, m_optimize_cam_odo_z ); + + std::vector< T > intrinsic_params( m_intrinsic_params.begin( ), m_intrinsic_params.end( ) ); + Eigen::Matrix< T, 3, 1 > P( point[0], point[1], point[2] ); + + // project 3D object point to the image plane + Eigen::Matrix< T, 2, 1 > predicted_p; + CameraT::spaceToPlane( intrinsic_params.data( ), q, t, P, predicted_p ); + + residuals[0] = predicted_p( 0 ) - T( m_observed_p( 0 ) ); + residuals[1] = predicted_p( 1 ) - T( m_observed_p( 1 ) ); + + return true; + } + + private: + // camera intrinsics + std::vector< double > m_intrinsic_params; + + // observed camera-odometry transform + Eigen::Quaterniond m_cam_odo_q; + Eigen::Vector3d m_cam_odo_t; + + // observed odometry + Eigen::Vector3d m_odo_pos; + Eigen::Vector3d m_odo_att; + + // observed 2D point + Eigen::Vector2d m_observed_p; + + Eigen::Matrix2d m_sqrtPrecisionMat; + + bool m_optimize_cam_odo_z; +}; + +// variables: camera intrinsics and camera extrinsics +template< class CameraT > +class StereoReprojectionError +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + StereoReprojectionError( const Eigen::Vector3d& observed_P, + const Eigen::Vector2d& observed_p_l, + const Eigen::Vector2d& observed_p_r ) + : m_observed_P( observed_P ) + , m_observed_p_l( observed_p_l ) + , m_observed_p_r( observed_p_r ) + { + } + + template< typename T > + bool operator( )( const T* const intrinsic_params_l, + const T* const intrinsic_params_r, + const T* const q_l, + const T* const t_l, + const T* const q_l_r, + const T* const t_l_r, + T* residuals ) const + { + Eigen::Matrix< T, 3, 1 > P; + P( 0 ) = T( m_observed_P( 0 ) ); + P( 1 ) = T( m_observed_P( 1 ) ); + P( 2 ) = T( m_observed_P( 2 ) ); + + Eigen::Matrix< T, 2, 1 > predicted_p_l; + CameraT::spaceToPlane( intrinsic_params_l, q_l, t_l, P, predicted_p_l ); + + Eigen::Quaternion< T > q_r = Eigen::Quaternion< T >( q_l_r ) * Eigen::Quaternion< T >( q_l ); + + Eigen::Matrix< T, 3, 1 > t_r; + t_r( 0 ) = t_l[0]; + t_r( 1 ) = t_l[1]; + t_r( 2 ) = t_l[2]; + + t_r = Eigen::Quaternion< T >( q_l_r ) * t_r; + t_r( 0 ) += t_l_r[0]; + t_r( 1 ) += t_l_r[1]; + t_r( 2 ) += t_l_r[2]; + + Eigen::Matrix< T, 2, 1 > predicted_p_r; + CameraT::spaceToPlane( intrinsic_params_r, q_r.coeffs( ).data( ), t_r.data( ), P, predicted_p_r ); + + residuals[0] = predicted_p_l( 0 ) - T( m_observed_p_l( 0 ) ); + residuals[1] = predicted_p_l( 1 ) - T( m_observed_p_l( 1 ) ); + residuals[2] = predicted_p_r( 0 ) - T( m_observed_p_r( 0 ) ); + residuals[3] = predicted_p_r( 1 ) - T( m_observed_p_r( 1 ) ); + + return true; + } + + private: + // observed 3D point + Eigen::Vector3d m_observed_P; + + // observed 2D point + Eigen::Vector2d m_observed_p_l; + Eigen::Vector2d m_observed_p_r; +}; + +template< class CameraT > +class ComprehensionError +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + ComprehensionError( const Eigen::Vector3d& observed_P, const Eigen::Vector2d& observed_p ) + : m_observed_P( observed_P ) + , m_observed_p( observed_p ) + , m_sqrtPrecisionMat( Eigen::Matrix2d::Identity( ) ) + { + } + + template< typename T > + bool operator( )( const T* const intrinsic_params, const T* const q, const T* const t, T* residuals ) const + { + { + Eigen::Matrix< T, 2, 1 > p = m_observed_p.cast< T >( ); + + Eigen::Matrix< T, 3, 1 > predicted_img_P; + CameraT::LiftToSphere( intrinsic_params, p, predicted_img_P ); + + Eigen::Matrix< T, 2, 1 > predicted_p; + CameraT::SphereToPlane( intrinsic_params, predicted_img_P, predicted_p ); + + Eigen::Matrix< T, 2, 1 > e = predicted_p - m_observed_p.cast< T >( ); + + Eigen::Matrix< T, 2, 1 > e_weighted = m_sqrtPrecisionMat.cast< T >( ) * e; + + residuals[0] = e_weighted( 0 ); + residuals[1] = e_weighted( 1 ); + } + + { + Eigen::Matrix< T, 3, 1 > P = m_observed_P.cast< T >( ); + + Eigen::Matrix< T, 2, 1 > predicted_p; + CameraT::spaceToPlane( intrinsic_params, q, t, P, predicted_p ); + + Eigen::Matrix< T, 2, 1 > e = predicted_p - m_observed_p.cast< T >( ); + + Eigen::Matrix< T, 2, 1 > e_weighted = m_sqrtPrecisionMat.cast< T >( ) * e; + + residuals[2] = e_weighted( 0 ); + residuals[3] = e_weighted( 1 ); + } + + return true; + } + + // private: + // camera intrinsics + // std::vector m_intrinsic_params; + + // observed 3D point + Eigen::Vector3d m_observed_P; + + // observed 2D point + Eigen::Vector2d m_observed_p; + + // square root of precision matrix + Eigen::Matrix2d m_sqrtPrecisionMat; +}; + +boost::shared_ptr< CostFunctionFactory > CostFunctionFactory::m_instance; + +CostFunctionFactory::CostFunctionFactory( ) {} + +boost::shared_ptr< CostFunctionFactory > +CostFunctionFactory::instance( void ) +{ + if ( m_instance.get( ) == 0 ) + { + m_instance.reset( new CostFunctionFactory ); + } + + return m_instance; +} + +ceres::CostFunction* +CostFunctionFactory::generateCostFunction( const CameraConstPtr& camera, + const Eigen::Vector3d& observed_P, + const Eigen::Vector2d& observed_p, + int flags ) const +{ + ceres::CostFunction* costFunction = 0; + + std::vector< double > intrinsic_params; + camera->writeParameters( intrinsic_params ); + + switch ( flags ) + { + case CAMERA_INTRINSICS | CAMERA_POSE: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< EquidistantCamera >, 2, 8, 4, 3 >( + new ReprojectionError1< EquidistantCamera >( observed_P, observed_p ) ); + break; + case Camera::PINHOLE: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< PinholeCamera >, 2, 8, 4, 3 >( + new ReprojectionError1< PinholeCamera >( observed_P, observed_p ) ); + break; + case Camera::PINHOLE_FULL: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< PinholeFullCamera >, 2, 12, 4, 3 >( + new ReprojectionError1< PinholeFullCamera >( observed_P, observed_p ) ); + break; + case Camera::MEI: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< CataCamera >, 2, 9, 4, 3 >( + new ReprojectionError1< CataCamera >( observed_P, observed_p ) ); + break; + case Camera::SCARAMUZZA: + costFunction + = new ceres::AutoDiffCostFunction< ComprehensionError< OCAMCamera >, 4, SCARAMUZZA_CAMERA_NUM_PARAMS, 4, 3 >( + new ComprehensionError< OCAMCamera >( observed_P, observed_p ) ); + // new ceres::AutoDiffCostFunction, 2, + // SCARAMUZZA_CAMERA_NUM_PARAMS, 4, 3>( new + // ReprojectionError1(observed_P, observed_p)); + break; + } + break; + case CAMERA_ODOMETRY_TRANSFORM | ODOMETRY_6D_POSE: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< EquidistantCamera >, 2, 4, 3, 3, 3 >( + new ReprojectionError1< EquidistantCamera >( intrinsic_params, observed_P, observed_p ) ); + break; + case Camera::PINHOLE: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< PinholeCamera >, 2, 4, 3, 3, 3 >( + new ReprojectionError1< PinholeCamera >( intrinsic_params, observed_P, observed_p ) ); + break; + case Camera::PINHOLE_FULL: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< PinholeFullCamera >, 2, 4, 3, 3, 3 >( + new ReprojectionError1< PinholeFullCamera >( intrinsic_params, observed_P, observed_p ) ); + break; + case Camera::MEI: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< CataCamera >, 2, 4, 3, 3, 3 >( + new ReprojectionError1< CataCamera >( intrinsic_params, observed_P, observed_p ) ); + break; + case Camera::SCARAMUZZA: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< OCAMCamera >, 2, 4, 3, 3, 3 >( + new ReprojectionError1< OCAMCamera >( intrinsic_params, observed_P, observed_p ) ); + break; + } + break; + } + + return costFunction; +} + +ceres::CostFunction* +CostFunctionFactory::generateCostFunction( const CameraConstPtr& camera, + const Eigen::Vector3d& observed_P, + const Eigen::Vector2d& observed_p, + const Eigen::Matrix2d& sqrtPrecisionMat, + int flags ) const +{ + ceres::CostFunction* costFunction = 0; + + std::vector< double > intrinsic_params; + camera->writeParameters( intrinsic_params ); + + switch ( flags ) + { + case CAMERA_INTRINSICS | CAMERA_POSE: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< EquidistantCamera >, 2, 8, 4, 3 >( + new ReprojectionError1< EquidistantCamera >( observed_P, observed_p, sqrtPrecisionMat ) ); + break; + case Camera::PINHOLE: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< PinholeCamera >, 2, 8, 4, 3 >( + new ReprojectionError1< PinholeCamera >( observed_P, observed_p, sqrtPrecisionMat ) ); + break; + case Camera::PINHOLE_FULL: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< PinholeFullCamera >, 2, 12, 4, 3 >( + new ReprojectionError1< PinholeFullCamera >( observed_P, observed_p, sqrtPrecisionMat ) ); + break; + case Camera::MEI: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< CataCamera >, 2, 9, 4, 3 >( + new ReprojectionError1< CataCamera >( observed_P, observed_p, sqrtPrecisionMat ) ); + break; + case Camera::SCARAMUZZA: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError1< OCAMCamera >, 2, SCARAMUZZA_CAMERA_NUM_PARAMS, 4, 3 >( + new ReprojectionError1< OCAMCamera >( observed_P, observed_p, sqrtPrecisionMat ) ); + break; + } + break; + } + + return costFunction; +} + +ceres::CostFunction* +CostFunctionFactory::generateCostFunction( const CameraConstPtr& camera, + const Eigen::Vector2d& observed_p, + int flags, + bool optimize_cam_odo_z ) const +{ + ceres::CostFunction* costFunction = 0; + + std::vector< double > intrinsic_params; + camera->writeParameters( intrinsic_params ); + + switch ( flags ) + { + case CAMERA_POSE | POINT_3D: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError2< EquidistantCamera >, 2, 4, 3, 3 >( + new ReprojectionError2< EquidistantCamera >( intrinsic_params, observed_p ) ); + break; + case Camera::PINHOLE: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError2< PinholeCamera >, 2, 4, 3, 3 >( + new ReprojectionError2< PinholeCamera >( intrinsic_params, observed_p ) ); + break; + case Camera::PINHOLE_FULL: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError2< PinholeFullCamera >, 2, 4, 3, 3 >( + new ReprojectionError2< PinholeFullCamera >( intrinsic_params, observed_p ) ); + break; + case Camera::MEI: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError2< CataCamera >, 2, 4, 3, 3 >( + new ReprojectionError2< CataCamera >( intrinsic_params, observed_p ) ); + break; + case Camera::SCARAMUZZA: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError2< OCAMCamera >, 2, 4, 3, 3 >( + new ReprojectionError2< OCAMCamera >( intrinsic_params, observed_p ) ); + break; + } + break; + case CAMERA_ODOMETRY_TRANSFORM | ODOMETRY_3D_POSE | POINT_3D: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< EquidistantCamera >, 2, 4, 3, 2, 1, 3 >( + new ReprojectionError3< EquidistantCamera >( intrinsic_params, observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 2, 2, 1, 3 >( + new ReprojectionError3< PinholeCamera >( intrinsic_params, observed_p ) ); + } + break; + case Camera::PINHOLE: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 3, 2, 1, 3 >( + new ReprojectionError3< PinholeCamera >( intrinsic_params, observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 2, 2, 1, 3 >( + new ReprojectionError3< PinholeCamera >( intrinsic_params, observed_p ) ); + } + break; + case Camera::PINHOLE_FULL: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 4, 3, 2, 1, 3 >( + new ReprojectionError3< PinholeFullCamera >( intrinsic_params, observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 4, 2, 2, 1, 3 >( + new ReprojectionError3< PinholeFullCamera >( intrinsic_params, observed_p ) ); + } + break; + case Camera::MEI: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 4, 3, 2, 1, 3 >( + new ReprojectionError3< CataCamera >( intrinsic_params, observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 4, 2, 2, 1, 3 >( + new ReprojectionError3< CataCamera >( intrinsic_params, observed_p ) ); + } + break; + case Camera::SCARAMUZZA: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, 4, 3, 2, 1, 3 >( + new ReprojectionError3< OCAMCamera >( intrinsic_params, observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, 4, 2, 2, 1, 3 >( + new ReprojectionError3< OCAMCamera >( intrinsic_params, observed_p ) ); + } + break; + } + break; + case CAMERA_ODOMETRY_TRANSFORM | ODOMETRY_6D_POSE | POINT_3D: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< EquidistantCamera >, 2, 4, 3, 3, 3, 3 >( + new ReprojectionError3< EquidistantCamera >( intrinsic_params, observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( intrinsic_params, observed_p ) ); + } + break; + case Camera::PINHOLE: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 3, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( intrinsic_params, observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( intrinsic_params, observed_p ) ); + } + break; + case Camera::PINHOLE_FULL: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 4, 3, 3, 3, 3 >( + new ReprojectionError3< PinholeFullCamera >( intrinsic_params, observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeFullCamera >( intrinsic_params, observed_p ) ); + } + break; + case Camera::MEI: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 4, 3, 3, 3, 3 >( + new ReprojectionError3< CataCamera >( intrinsic_params, observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 4, 2, 3, 3, 3 >( + new ReprojectionError3< CataCamera >( intrinsic_params, observed_p ) ); + } + break; + case Camera::SCARAMUZZA: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, 4, 3, 3, 3, 3 >( + new ReprojectionError3< OCAMCamera >( intrinsic_params, observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, 4, 2, 3, 3, 3 >( + new ReprojectionError3< OCAMCamera >( intrinsic_params, observed_p ) ); + } + break; + } + break; + case CAMERA_INTRINSICS | CAMERA_ODOMETRY_TRANSFORM | ODOMETRY_3D_POSE | POINT_3D: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< EquidistantCamera >, 2, 8, 4, 3, 2, 1, 3 >( + new ReprojectionError3< EquidistantCamera >( observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 8, 4, 2, 2, 1, 3 >( + new ReprojectionError3< PinholeCamera >( observed_p ) ); + } + break; + case Camera::PINHOLE: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 8, 4, 3, 2, 1, 3 >( + new ReprojectionError3< PinholeCamera >( observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 8, 4, 2, 2, 1, 3 >( + new ReprojectionError3< PinholeCamera >( observed_p ) ); + } + break; + case Camera::PINHOLE_FULL: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 12, 4, 3, 2, 1, 3 >( + new ReprojectionError3< PinholeFullCamera >( observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 12, 4, 2, 2, 1, 3 >( + new ReprojectionError3< PinholeFullCamera >( observed_p ) ); + } + break; + case Camera::MEI: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 9, 4, 3, 2, 1, 3 >( + new ReprojectionError3< CataCamera >( observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 9, 4, 2, 2, 1, 3 >( + new ReprojectionError3< CataCamera >( observed_p ) ); + } + break; + case Camera::SCARAMUZZA: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, SCARAMUZZA_CAMERA_NUM_PARAMS, 4, 3, 2, 1, 3 >( + new ReprojectionError3< OCAMCamera >( observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, SCARAMUZZA_CAMERA_NUM_PARAMS, 4, 2, 2, 1, 3 >( + new ReprojectionError3< OCAMCamera >( observed_p ) ); + } + break; + } + break; + case CAMERA_INTRINSICS | CAMERA_ODOMETRY_TRANSFORM | ODOMETRY_6D_POSE | POINT_3D: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< EquidistantCamera >, 2, 8, 4, 3, 3, 3, 3 >( + new ReprojectionError3< EquidistantCamera >( observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 8, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( observed_p ) ); + } + break; + case Camera::PINHOLE: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 8, 4, 3, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 8, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( observed_p ) ); + } + break; + case Camera::PINHOLE_FULL: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 12, 4, 3, 3, 3, 3 >( + new ReprojectionError3< PinholeFullCamera >( observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 12, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeFullCamera >( observed_p ) ); + } + break; + case Camera::MEI: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 9, 4, 3, 3, 3, 3 >( + new ReprojectionError3< CataCamera >( observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 9, 4, 2, 3, 3, 3 >( + new ReprojectionError3< CataCamera >( observed_p ) ); + } + break; + case Camera::SCARAMUZZA: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, SCARAMUZZA_CAMERA_NUM_PARAMS, 4, 3, 3, 3, 3 >( + new ReprojectionError3< OCAMCamera >( observed_p ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, SCARAMUZZA_CAMERA_NUM_PARAMS, 4, 2, 3, 3, 3 >( + new ReprojectionError3< OCAMCamera >( observed_p ) ); + } + break; + } + break; + } + + return costFunction; +} + +ceres::CostFunction* +CostFunctionFactory::generateCostFunction( const CameraConstPtr& camera, + const Eigen::Vector2d& observed_p, + const Eigen::Matrix2d& sqrtPrecisionMat, + int flags, + bool optimize_cam_odo_z ) const +{ + ceres::CostFunction* costFunction = 0; + + std::vector< double > intrinsic_params; + camera->writeParameters( intrinsic_params ); + + switch ( flags ) + { + case CAMERA_ODOMETRY_TRANSFORM | ODOMETRY_6D_POSE | POINT_3D: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< EquidistantCamera >, 2, 4, 3, 3, 3, 3 >( + new ReprojectionError3< EquidistantCamera >( intrinsic_params, observed_p, sqrtPrecisionMat ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( intrinsic_params, observed_p, sqrtPrecisionMat ) ); + } + break; + case Camera::PINHOLE: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 3, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( intrinsic_params, observed_p, sqrtPrecisionMat ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( intrinsic_params, observed_p, sqrtPrecisionMat ) ); + } + break; + case Camera::PINHOLE_FULL: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 4, 3, 3, 3, 3 >( + new ReprojectionError3< PinholeFullCamera >( intrinsic_params, observed_p, sqrtPrecisionMat ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeFullCamera >( intrinsic_params, observed_p, sqrtPrecisionMat ) ); + } + break; + case Camera::MEI: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 4, 3, 3, 3, 3 >( + new ReprojectionError3< CataCamera >( intrinsic_params, observed_p, sqrtPrecisionMat ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 4, 2, 3, 3, 3 >( + new ReprojectionError3< CataCamera >( intrinsic_params, observed_p, sqrtPrecisionMat ) ); + } + break; + case Camera::SCARAMUZZA: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, 4, 3, 3, 3, 3 >( + new ReprojectionError3< OCAMCamera >( intrinsic_params, observed_p, sqrtPrecisionMat ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, 4, 2, 3, 3, 3 >( + new ReprojectionError3< OCAMCamera >( intrinsic_params, observed_p, sqrtPrecisionMat ) ); + } + break; + } + break; + case CAMERA_INTRINSICS | CAMERA_ODOMETRY_TRANSFORM | ODOMETRY_6D_POSE | POINT_3D: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< EquidistantCamera >, 2, 8, 4, 3, 3, 3, 3 >( + new ReprojectionError3< EquidistantCamera >( observed_p, sqrtPrecisionMat ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 8, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( observed_p, sqrtPrecisionMat ) ); + } + break; + case Camera::PINHOLE: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 8, 4, 3, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( observed_p, sqrtPrecisionMat ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 8, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeCamera >( observed_p, sqrtPrecisionMat ) ); + } + break; + case Camera::PINHOLE_FULL: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 12, 4, 3, 3, 3, 3 >( + new ReprojectionError3< PinholeFullCamera >( observed_p, sqrtPrecisionMat ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 12, 4, 2, 3, 3, 3 >( + new ReprojectionError3< PinholeFullCamera >( observed_p, sqrtPrecisionMat ) ); + } + break; + case Camera::MEI: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 9, 4, 3, 3, 3, 3 >( + new ReprojectionError3< CataCamera >( observed_p, sqrtPrecisionMat ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 9, 4, 2, 3, 3, 3 >( + new ReprojectionError3< CataCamera >( observed_p, sqrtPrecisionMat ) ); + } + break; + case Camera::SCARAMUZZA: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, SCARAMUZZA_CAMERA_NUM_PARAMS, 4, 3, 3, 3, 3 >( + new ReprojectionError3< OCAMCamera >( observed_p, sqrtPrecisionMat ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, SCARAMUZZA_CAMERA_NUM_PARAMS, 4, 2, 3, 3, 3 >( + new ReprojectionError3< OCAMCamera >( observed_p, sqrtPrecisionMat ) ); + } + break; + } + break; + } + + return costFunction; +} + +ceres::CostFunction* +CostFunctionFactory::generateCostFunction( const CameraConstPtr& camera, + const Eigen::Vector3d& odo_pos, + const Eigen::Vector3d& odo_att, + const Eigen::Vector2d& observed_p, + int flags, + bool optimize_cam_odo_z ) const +{ + ceres::CostFunction* costFunction = 0; + + std::vector< double > intrinsic_params; + camera->writeParameters( intrinsic_params ); + + switch ( flags ) + { + case CAMERA_ODOMETRY_TRANSFORM | POINT_3D: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< EquidistantCamera >, 2, 4, 3, 3 >( + new ReprojectionError3< EquidistantCamera >( + intrinsic_params, odo_pos, odo_att, observed_p, optimize_cam_odo_z ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< EquidistantCamera >, 2, 4, 2, 3 >( + new ReprojectionError3< EquidistantCamera >( + intrinsic_params, odo_pos, odo_att, observed_p, optimize_cam_odo_z ) ); + } + break; + case Camera::PINHOLE: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 3, 3 >( + new ReprojectionError3< PinholeCamera >( + intrinsic_params, odo_pos, odo_att, observed_p, optimize_cam_odo_z ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 4, 2, 3 >( + new ReprojectionError3< PinholeCamera >( + intrinsic_params, odo_pos, odo_att, observed_p, optimize_cam_odo_z ) ); + } + break; + case Camera::PINHOLE_FULL: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 4, 3, 3 >( + new ReprojectionError3< PinholeFullCamera >( + intrinsic_params, odo_pos, odo_att, observed_p, optimize_cam_odo_z ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 4, 2, 3 >( + new ReprojectionError3< PinholeFullCamera >( + intrinsic_params, odo_pos, odo_att, observed_p, optimize_cam_odo_z ) ); + } + break; + case Camera::MEI: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 4, 3, 3 >( + new ReprojectionError3< CataCamera >( + intrinsic_params, odo_pos, odo_att, observed_p, optimize_cam_odo_z ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 4, 2, 3 >( + new ReprojectionError3< CataCamera >( + intrinsic_params, odo_pos, odo_att, observed_p, optimize_cam_odo_z ) ); + } + break; + case Camera::SCARAMUZZA: + if ( optimize_cam_odo_z ) + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, 4, 3, 3 >( + new ReprojectionError3< OCAMCamera >( + intrinsic_params, odo_pos, odo_att, observed_p, optimize_cam_odo_z ) ); + } + else + { + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, 4, 2, 3 >( + new ReprojectionError3< OCAMCamera >( + intrinsic_params, odo_pos, odo_att, observed_p, optimize_cam_odo_z ) ); + } + break; + } + break; + } + + return costFunction; +} + +ceres::CostFunction* +CostFunctionFactory::generateCostFunction( const CameraConstPtr& camera, + const Eigen::Quaterniond& cam_odo_q, + const Eigen::Vector3d& cam_odo_t, + const Eigen::Vector3d& odo_pos, + const Eigen::Vector3d& odo_att, + const Eigen::Vector2d& observed_p, + int flags ) const +{ + ceres::CostFunction* costFunction = 0; + + std::vector< double > intrinsic_params; + camera->writeParameters( intrinsic_params ); + + switch ( flags ) + { + case POINT_3D: + switch ( camera->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< EquidistantCamera >, 2, 3 >( + new ReprojectionError3< EquidistantCamera >( + intrinsic_params, cam_odo_q, cam_odo_t, odo_pos, odo_att, observed_p ) ); + break; + case Camera::PINHOLE: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeCamera >, 2, 3 >( + new ReprojectionError3< PinholeCamera >( + intrinsic_params, cam_odo_q, cam_odo_t, odo_pos, odo_att, observed_p ) ); + break; + case Camera::PINHOLE_FULL: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< PinholeFullCamera >, 2, 3 >( + new ReprojectionError3< PinholeFullCamera >( + intrinsic_params, cam_odo_q, cam_odo_t, odo_pos, odo_att, observed_p ) ); + break; + case Camera::MEI: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< CataCamera >, 2, 3 >( + new ReprojectionError3< CataCamera >( + intrinsic_params, cam_odo_q, cam_odo_t, odo_pos, odo_att, observed_p ) ); + break; + case Camera::SCARAMUZZA: + costFunction + = new ceres::AutoDiffCostFunction< ReprojectionError3< OCAMCamera >, 2, 3 >( + new ReprojectionError3< OCAMCamera >( + intrinsic_params, cam_odo_q, cam_odo_t, odo_pos, odo_att, observed_p ) ); + break; + } + break; + } + + return costFunction; +} + +ceres::CostFunction* +CostFunctionFactory::generateCostFunction( const CameraConstPtr& cameraL, + const CameraConstPtr& cameraR, + const Eigen::Vector3d& observed_P, + const Eigen::Vector2d& observed_p_l, + const Eigen::Vector2d& observed_p_r ) const +{ + ceres::CostFunction* costFunction = 0; + + if ( cameraL->modelType( ) != cameraR->modelType( ) ) + { + return costFunction; + } + + switch ( cameraL->modelType( ) ) + { + case Camera::KANNALA_BRANDT: + costFunction + = new ceres::AutoDiffCostFunction< StereoReprojectionError< EquidistantCamera >, 4, 8, 8, 4, 3, 4, 3 >( + new StereoReprojectionError< EquidistantCamera >( observed_P, observed_p_l, observed_p_r ) ); + break; + case Camera::PINHOLE: + costFunction + = new ceres::AutoDiffCostFunction< StereoReprojectionError< PinholeCamera >, 4, 8, 8, 4, 3, 4, 3 >( + new StereoReprojectionError< PinholeCamera >( observed_P, observed_p_l, observed_p_r ) ); + break; + case Camera::PINHOLE_FULL: + costFunction + = new ceres::AutoDiffCostFunction< StereoReprojectionError< PinholeFullCamera >, 4, 12, 12, 4, 3, 4, 3 >( + new StereoReprojectionError< PinholeFullCamera >( observed_P, observed_p_l, observed_p_r ) ); + break; + case Camera::MEI: + costFunction + = new ceres::AutoDiffCostFunction< StereoReprojectionError< CataCamera >, 4, 9, 9, 4, 3, 4, 3 >( + new StereoReprojectionError< CataCamera >( observed_P, observed_p_l, observed_p_r ) ); + break; + case Camera::SCARAMUZZA: + costFunction + = new ceres::AutoDiffCostFunction< StereoReprojectionError< OCAMCamera >, 4, SCARAMUZZA_CAMERA_NUM_PARAMS, SCARAMUZZA_CAMERA_NUM_PARAMS, 4, 3, 4, 3 >( + new StereoReprojectionError< OCAMCamera >( observed_P, observed_p_l, observed_p_r ) ); + break; + } + + return costFunction; +} +} diff --git a/camera_models/src/camera_models/EquidistantCamera.cc b/camera_models/src/camera_models/EquidistantCamera.cc new file mode 100644 index 0000000..480f51a --- /dev/null +++ b/camera_models/src/camera_models/EquidistantCamera.cc @@ -0,0 +1,820 @@ +#include "camodocal/camera_models/EquidistantCamera.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "camodocal/gpl/gpl.h" + +namespace camodocal +{ + +EquidistantCamera::Parameters::Parameters() + : Camera::Parameters(KANNALA_BRANDT) + , m_k2(0.0) + , m_k3(0.0) + , m_k4(0.0) + , m_k5(0.0) + , m_mu(0.0) + , m_mv(0.0) + , m_u0(0.0) + , m_v0(0.0) +{ + +} + +EquidistantCamera::Parameters::Parameters(const std::string& cameraName, + int w, int h, + double k2, double k3, double k4, double k5, + double mu, double mv, + double u0, double v0) + : Camera::Parameters(KANNALA_BRANDT, cameraName, w, h) + , m_k2(k2) + , m_k3(k3) + , m_k4(k4) + , m_k5(k5) + , m_mu(mu) + , m_mv(mv) + , m_u0(u0) + , m_v0(v0) +{ + +} + +double& +EquidistantCamera::Parameters::k2(void) +{ + return m_k2; +} + +double& +EquidistantCamera::Parameters::k3(void) +{ + return m_k3; +} + +double& +EquidistantCamera::Parameters::k4(void) +{ + return m_k4; +} + +double& +EquidistantCamera::Parameters::k5(void) +{ + return m_k5; +} + +double& +EquidistantCamera::Parameters::mu(void) +{ + return m_mu; +} + +double& +EquidistantCamera::Parameters::mv(void) +{ + return m_mv; +} + +double& +EquidistantCamera::Parameters::u0(void) +{ + return m_u0; +} + +double& +EquidistantCamera::Parameters::v0(void) +{ + return m_v0; +} + +double +EquidistantCamera::Parameters::k2(void) const +{ + return m_k2; +} + +double +EquidistantCamera::Parameters::k3(void) const +{ + return m_k3; +} + +double +EquidistantCamera::Parameters::k4(void) const +{ + return m_k4; +} + +double +EquidistantCamera::Parameters::k5(void) const +{ + return m_k5; +} + +double +EquidistantCamera::Parameters::mu(void) const +{ + return m_mu; +} + +double +EquidistantCamera::Parameters::mv(void) const +{ + return m_mv; +} + +double +EquidistantCamera::Parameters::u0(void) const +{ + return m_u0; +} + +double +EquidistantCamera::Parameters::v0(void) const +{ + return m_v0; +} + +bool +EquidistantCamera::Parameters::readFromYamlFile(const std::string& filename) +{ + cv::FileStorage fs(filename, cv::FileStorage::READ); + + if (!fs.isOpened()) + { + return false; + } + + if (!fs["model_type"].isNone()) + { + std::string sModelType; + fs["model_type"] >> sModelType; + + if (sModelType.compare("KANNALA_BRANDT") != 0) + { + return false; + } + } + + m_modelType = KANNALA_BRANDT; + fs["camera_name"] >> m_cameraName; + m_imageWidth = static_cast(fs["image_width"]); + m_imageHeight = static_cast(fs["image_height"]); + + cv::FileNode n = fs["projection_parameters"]; + m_k2 = static_cast(n["k2"]); + m_k3 = static_cast(n["k3"]); + m_k4 = static_cast(n["k4"]); + m_k5 = static_cast(n["k5"]); + m_mu = static_cast(n["mu"]); + m_mv = static_cast(n["mv"]); + m_u0 = static_cast(n["u0"]); + m_v0 = static_cast(n["v0"]); + + return true; +} + +void +EquidistantCamera::Parameters::writeToYamlFile(const std::string& filename) const +{ + cv::FileStorage fs(filename, cv::FileStorage::WRITE); + + fs << "model_type" << "KANNALA_BRANDT"; + fs << "camera_name" << m_cameraName; + fs << "image_width" << m_imageWidth; + fs << "image_height" << m_imageHeight; + + // projection: k2, k3, k4, k5, mu, mv, u0, v0 + fs << "projection_parameters"; + fs << "{" << "k2" << m_k2 + << "k3" << m_k3 + << "k4" << m_k4 + << "k5" << m_k5 + << "mu" << m_mu + << "mv" << m_mv + << "u0" << m_u0 + << "v0" << m_v0 << "}"; + + fs.release(); +} + +EquidistantCamera::Parameters& +EquidistantCamera::Parameters::operator=(const EquidistantCamera::Parameters& other) +{ + if (this != &other) + { + m_modelType = other.m_modelType; + m_cameraName = other.m_cameraName; + m_imageWidth = other.m_imageWidth; + m_imageHeight = other.m_imageHeight; + m_k2 = other.m_k2; + m_k3 = other.m_k3; + m_k4 = other.m_k4; + m_k5 = other.m_k5; + m_mu = other.m_mu; + m_mv = other.m_mv; + m_u0 = other.m_u0; + m_v0 = other.m_v0; + } + + return *this; +} + +std::ostream& +operator<< (std::ostream& out, const EquidistantCamera::Parameters& params) +{ + out << "Camera Parameters:" << std::endl; + out << " model_type " << "KANNALA_BRANDT" << std::endl; + out << " camera_name " << params.m_cameraName << std::endl; + out << " image_width " << params.m_imageWidth << std::endl; + out << " image_height " << params.m_imageHeight << std::endl; + + // projection: k2, k3, k4, k5, mu, mv, u0, v0 + out << "Projection Parameters" << std::endl; + out << " k2 " << params.m_k2 << std::endl + << " k3 " << params.m_k3 << std::endl + << " k4 " << params.m_k4 << std::endl + << " k5 " << params.m_k5 << std::endl + << " mu " << params.m_mu << std::endl + << " mv " << params.m_mv << std::endl + << " u0 " << params.m_u0 << std::endl + << " v0 " << params.m_v0 << std::endl; + + return out; +} + +EquidistantCamera::EquidistantCamera() + : m_inv_K11(1.0) + , m_inv_K13(0.0) + , m_inv_K22(1.0) + , m_inv_K23(0.0) +{ + +} + +EquidistantCamera::EquidistantCamera(const std::string& cameraName, + int imageWidth, int imageHeight, + double k2, double k3, double k4, double k5, + double mu, double mv, + double u0, double v0) + : mParameters(cameraName, imageWidth, imageHeight, + k2, k3, k4, k5, mu, mv, u0, v0) +{ + // Inverse camera projection matrix parameters + m_inv_K11 = 1.0 / mParameters.mu(); + m_inv_K13 = -mParameters.u0() / mParameters.mu(); + m_inv_K22 = 1.0 / mParameters.mv(); + m_inv_K23 = -mParameters.v0() / mParameters.mv(); +} + +EquidistantCamera::EquidistantCamera(const EquidistantCamera::Parameters& params) + : mParameters(params) +{ + // Inverse camera projection matrix parameters + m_inv_K11 = 1.0 / mParameters.mu(); + m_inv_K13 = -mParameters.u0() / mParameters.mu(); + m_inv_K22 = 1.0 / mParameters.mv(); + m_inv_K23 = -mParameters.v0() / mParameters.mv(); +} + +Camera::ModelType +EquidistantCamera::modelType(void) const +{ + return mParameters.modelType(); +} + +const std::string& +EquidistantCamera::cameraName(void) const +{ + return mParameters.cameraName(); +} + +int +EquidistantCamera::imageWidth(void) const +{ + return mParameters.imageWidth(); +} + +int +EquidistantCamera::imageHeight(void) const +{ + return mParameters.imageHeight(); +} + +void +EquidistantCamera::estimateIntrinsics(const cv::Size& boardSize, + const std::vector< std::vector >& objectPoints, + const std::vector< std::vector >& imagePoints) +{ + Parameters params = getParameters(); + + double u0 = params.imageWidth() / 2.0; + double v0 = params.imageHeight() / 2.0; + + double minReprojErr = std::numeric_limits::max(); + + std::vector rvecs, tvecs; + rvecs.assign(objectPoints.size(), cv::Mat()); + tvecs.assign(objectPoints.size(), cv::Mat()); + + params.k2() = 0.0; + params.k3() = 0.0; + params.k4() = 0.0; + params.k5() = 0.0; + params.u0() = u0; + params.v0() = v0; + + // Initialize focal length + // C. Hughes, P. Denny, M. Glavin, and E. Jones, + // Equidistant Fish-Eye Calibration and Rectification by Vanishing Point + // Extraction, PAMI 2010 + // Find circles from rows of chessboard corners, and for each pair + // of circles, find vanishing points: v1 and v2. + // f = ||v1 - v2|| / PI; + double f0 = 0.0; + for (size_t i = 0; i < imagePoints.size(); ++i) + { + std::vector center(boardSize.height); + double radius[boardSize.height]; + for (int r = 0; r < boardSize.height; ++r) + { + std::vector circle; + for (int c = 0; c < boardSize.width; ++c) + { + circle.push_back(imagePoints.at(i).at(r * boardSize.width + c)); + } + + fitCircle(circle, center[r](0), center[r](1), radius[r]); + } + + for (int j = 0; j < boardSize.height; ++j) + { + for (int k = j + 1; k < boardSize.height; ++k) + { + // find distance between pair of vanishing points which + // correspond to intersection points of 2 circles + std::vector ipts; + ipts = intersectCircles(center[j](0), center[j](1), radius[j], + center[k](0), center[k](1), radius[k]); + + if (ipts.size() < 2) + { + continue; + } + + double f = cv::norm(ipts.at(0) - ipts.at(1)) / M_PI; + + params.mu() = f; + params.mv() = f; + + setParameters(params); + + for (size_t l = 0; l < objectPoints.size(); ++l) + { + estimateExtrinsics(objectPoints.at(l), imagePoints.at(l), rvecs.at(l), tvecs.at(l)); + } + + double reprojErr = reprojectionError(objectPoints, imagePoints, rvecs, tvecs, cv::noArray()); + + if (reprojErr < minReprojErr) + { + minReprojErr = reprojErr; + f0 = f; + } + } + } + } + + if (f0 <= 0.0 && minReprojErr >= std::numeric_limits::max()) + { + std::cout << "[" << params.cameraName() << "] " + << "# INFO: kannala-Brandt model fails with given data. " << std::endl; + + return; + } + + params.mu() = f0; + params.mv() = f0; + + setParameters(params); +} + +/** + * \brief Lifts a point from the image plane to the unit sphere + * + * \param p image coordinates + * \param P coordinates of the point on the sphere + */ +void +EquidistantCamera::liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const +{ + liftProjective(p, P); +} + +/** + * \brief Lifts a point from the image plane to its projective ray + * + * \param p image coordinates + * \param P coordinates of the projective ray + */ +void +EquidistantCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const +{ + // Lift points to normalised plane + Eigen::Vector2d p_u; + p_u << m_inv_K11 * p(0) + m_inv_K13, + m_inv_K22 * p(1) + m_inv_K23; + + // Obtain a projective ray + double theta, phi; + backprojectSymmetric(p_u, theta, phi); + + P(0) = sin(theta) * cos(phi); + P(1) = sin(theta) * sin(phi); + P(2) = cos(theta); +} + +/** + * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v) + * + * \param P 3D point coordinates + * \param p return value, contains the image point coordinates + */ +void +EquidistantCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const +{ + double theta = acos(P(2) / P.norm()); + double phi = atan2(P(1), P(0)); + + Eigen::Vector2d p_u = r(mParameters.k2(), mParameters.k3(), mParameters.k4(), mParameters.k5(), theta) * Eigen::Vector2d(cos(phi), sin(phi)); + + // Apply generalised projection matrix + p << mParameters.mu() * p_u(0) + mParameters.u0(), + mParameters.mv() * p_u(1) + mParameters.v0(); +} + + +/** + * \brief Project a 3D point to the image plane and calculate Jacobian + * + * \param P 3D point coordinates + * \param p return value, contains the image point coordinates + */ +void +EquidistantCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, + Eigen::Matrix& J) const +{ + double theta = acos(P(2) / P.norm()); + double phi = atan2(P(1), P(0)); + + Eigen::Vector2d p_u = r(mParameters.k2(), mParameters.k3(), mParameters.k4(), mParameters.k5(), theta) * Eigen::Vector2d(cos(phi), sin(phi)); + + // Apply generalised projection matrix + p << mParameters.mu() * p_u(0) + mParameters.u0(), + mParameters.mv() * p_u(1) + mParameters.v0(); +} + +/** + * \brief Projects an undistorted 2D point p_u to the image plane + * + * \param p_u 2D point coordinates + * \return image point coordinates + */ +void +EquidistantCamera::undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const +{ +// Eigen::Vector2d p_d; +// +// if (m_noDistortion) +// { +// p_d = p_u; +// } +// else +// { +// // Apply distortion +// Eigen::Vector2d d_u; +// distortion(p_u, d_u); +// p_d = p_u + d_u; +// } +// +// // Apply generalised projection matrix +// p << mParameters.gamma1() * p_d(0) + mParameters.u0(), +// mParameters.gamma2() * p_d(1) + mParameters.v0(); +} + +void +EquidistantCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const +{ + cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight()); + + cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F); + cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F); + + for (int v = 0; v < imageSize.height; ++v) + { + for (int u = 0; u < imageSize.width; ++u) + { + double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale; + double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale; + + double theta, phi; + backprojectSymmetric(Eigen::Vector2d(mx_u, my_u), theta, phi); + + Eigen::Vector3d P; + P << sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta); + + Eigen::Vector2d p; + spaceToPlane(P, p); + + mapX.at(v,u) = p(0); + mapY.at(v,u) = p(1); + } + } + + cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); +} + +cv::Mat +EquidistantCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, + float fx, float fy, + cv::Size imageSize, + float cx, float cy, + cv::Mat rmat) const +{ + if (imageSize == cv::Size(0, 0)) + { + imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight()); + } + + cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); + cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); + + Eigen::Matrix3f K_rect; + + if (cx == -1.0f && cy == -1.0f) + { + K_rect << fx, 0, imageSize.width / 2, + 0, fy, imageSize.height / 2, + 0, 0, 1; + } + else + { + K_rect << fx, 0, cx, + 0, fy, cy, + 0, 0, 1; + } + + if (fx == -1.0f || fy == -1.0f) + { + K_rect(0,0) = mParameters.mu(); + K_rect(1,1) = mParameters.mv(); + } + + Eigen::Matrix3f K_rect_inv = K_rect.inverse(); + + Eigen::Matrix3f R, R_inv; + cv::cv2eigen(rmat, R); + R_inv = R.inverse(); + + for (int v = 0; v < imageSize.height; ++v) + { + for (int u = 0; u < imageSize.width; ++u) + { + Eigen::Vector3f xo; + xo << u, v, 1; + + Eigen::Vector3f uo = R_inv * K_rect_inv * xo; + + Eigen::Vector2d p; + spaceToPlane(uo.cast(), p); + + mapX.at(v,u) = p(0); + mapY.at(v,u) = p(1); + } + } + + cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); + + cv::Mat K_rect_cv; + cv::eigen2cv(K_rect, K_rect_cv); + return K_rect_cv; +} + +int +EquidistantCamera::parameterCount(void) const +{ + return 8; +} + +const EquidistantCamera::Parameters& +EquidistantCamera::getParameters(void) const +{ + return mParameters; +} + +void +EquidistantCamera::setParameters(const EquidistantCamera::Parameters& parameters) +{ + mParameters = parameters; + + // Inverse camera projection matrix parameters + m_inv_K11 = 1.0 / mParameters.mu(); + m_inv_K13 = -mParameters.u0() / mParameters.mu(); + m_inv_K22 = 1.0 / mParameters.mv(); + m_inv_K23 = -mParameters.v0() / mParameters.mv(); +} + +void +EquidistantCamera::readParameters(const std::vector& parameterVec) +{ + if (parameterVec.size() != parameterCount()) + { + return; + } + + Parameters params = getParameters(); + + params.k2() = parameterVec.at(0); + params.k3() = parameterVec.at(1); + params.k4() = parameterVec.at(2); + params.k5() = parameterVec.at(3); + params.mu() = parameterVec.at(4); + params.mv() = parameterVec.at(5); + params.u0() = parameterVec.at(6); + params.v0() = parameterVec.at(7); + + setParameters(params); +} + +void +EquidistantCamera::writeParameters(std::vector& parameterVec) const +{ + parameterVec.resize(parameterCount()); + parameterVec.at(0) = mParameters.k2(); + parameterVec.at(1) = mParameters.k3(); + parameterVec.at(2) = mParameters.k4(); + parameterVec.at(3) = mParameters.k5(); + parameterVec.at(4) = mParameters.mu(); + parameterVec.at(5) = mParameters.mv(); + parameterVec.at(6) = mParameters.u0(); + parameterVec.at(7) = mParameters.v0(); +} + +void +EquidistantCamera::writeParametersToYamlFile(const std::string& filename) const +{ + mParameters.writeToYamlFile(filename); +} + +std::string +EquidistantCamera::parametersToString(void) const +{ + std::ostringstream oss; + oss << mParameters; + + return oss.str(); +} + +void +EquidistantCamera::fitOddPoly(const std::vector& x, const std::vector& y, + int n, std::vector& coeffs) const +{ + std::vector pows; + for (int i = 1; i <= n; i += 2) + { + pows.push_back(i); + } + + Eigen::MatrixXd X(x.size(), pows.size()); + Eigen::MatrixXd Y(y.size(), 1); + for (size_t i = 0; i < x.size(); ++i) + { + for (size_t j = 0; j < pows.size(); ++j) + { + X(i,j) = pow(x.at(i), pows.at(j)); + } + Y(i,0) = y.at(i); + } + + Eigen::MatrixXd A = (X.transpose() * X).inverse() * X.transpose() * Y; + + coeffs.resize(A.rows()); + for (int i = 0; i < A.rows(); ++i) + { + coeffs.at(i) = A(i,0); + } +} + +void +EquidistantCamera::backprojectSymmetric(const Eigen::Vector2d& p_u, + double& theta, double& phi) const +{ + double tol = 1e-10; + double p_u_norm = p_u.norm(); + + if (p_u_norm < 1e-10) + { + phi = 0.0; + } + else + { + phi = atan2(p_u(1), p_u(0)); + } + + int npow = 9; + if (mParameters.k5() == 0.0) + { + npow -= 2; + } + if (mParameters.k4() == 0.0) + { + npow -= 2; + } + if (mParameters.k3() == 0.0) + { + npow -= 2; + } + if (mParameters.k2() == 0.0) + { + npow -= 2; + } + + Eigen::MatrixXd coeffs(npow + 1, 1); + coeffs.setZero(); + coeffs(0) = -p_u_norm; + coeffs(1) = 1.0; + + if (npow >= 3) + { + coeffs(3) = mParameters.k2(); + } + if (npow >= 5) + { + coeffs(5) = mParameters.k3(); + } + if (npow >= 7) + { + coeffs(7) = mParameters.k4(); + } + if (npow >= 9) + { + coeffs(9) = mParameters.k5(); + } + + if (npow == 1) + { + theta = p_u_norm; + } + else + { + // Get eigenvalues of companion matrix corresponding to polynomial. + // Eigenvalues correspond to roots of polynomial. + Eigen::MatrixXd A(npow, npow); + A.setZero(); + A.block(1, 0, npow - 1, npow - 1).setIdentity(); + A.col(npow - 1) = - coeffs.block(0, 0, npow, 1) / coeffs(npow); + + Eigen::EigenSolver es(A); + Eigen::MatrixXcd eigval = es.eigenvalues(); + + std::vector thetas; + for (int i = 0; i < eigval.rows(); ++i) + { + if (fabs(eigval(i).imag()) > tol) + { + continue; + } + + double t = eigval(i).real(); + + if (t < -tol) + { + continue; + } + else if (t < 0.0) + { + t = 0.0; + } + + thetas.push_back(t); + } + + if (thetas.empty()) + { + theta = p_u_norm; + } + else + { + theta = *std::min_element(thetas.begin(), thetas.end()); + } + } +} + +} diff --git a/camera_models/src/camera_models/PinholeCamera.cc b/camera_models/src/camera_models/PinholeCamera.cc new file mode 100644 index 0000000..9839724 --- /dev/null +++ b/camera_models/src/camera_models/PinholeCamera.cc @@ -0,0 +1,881 @@ +#include "camodocal/camera_models/PinholeCamera.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "camodocal/gpl/gpl.h" + +namespace camodocal +{ + +PinholeCamera::Parameters::Parameters() + : Camera::Parameters(PINHOLE) + , m_k1(0.0) + , m_k2(0.0) + , m_p1(0.0) + , m_p2(0.0) + , m_fx(0.0) + , m_fy(0.0) + , m_cx(0.0) + , m_cy(0.0) +{ + +} + +PinholeCamera::Parameters::Parameters(const std::string& cameraName, + int w, int h, + double k1, double k2, + double p1, double p2, + double fx, double fy, + double cx, double cy) + : Camera::Parameters(PINHOLE, cameraName, w, h) + , m_k1(k1) + , m_k2(k2) + , m_p1(p1) + , m_p2(p2) + , m_fx(fx) + , m_fy(fy) + , m_cx(cx) + , m_cy(cy) +{ +} + +double& +PinholeCamera::Parameters::k1(void) +{ + return m_k1; +} + +double& +PinholeCamera::Parameters::k2(void) +{ + return m_k2; +} + +double& +PinholeCamera::Parameters::p1(void) +{ + return m_p1; +} + +double& +PinholeCamera::Parameters::p2(void) +{ + return m_p2; +} + +double& +PinholeCamera::Parameters::fx(void) +{ + return m_fx; +} + +double& +PinholeCamera::Parameters::fy(void) +{ + return m_fy; +} + +double& +PinholeCamera::Parameters::cx(void) +{ + return m_cx; +} + +double& +PinholeCamera::Parameters::cy(void) +{ + return m_cy; +} + +double +PinholeCamera::Parameters::k1(void) const +{ + return m_k1; +} + +double +PinholeCamera::Parameters::k2(void) const +{ + return m_k2; +} + +double +PinholeCamera::Parameters::p1(void) const +{ + return m_p1; +} + +double +PinholeCamera::Parameters::p2(void) const +{ + return m_p2; +} + +double +PinholeCamera::Parameters::fx(void) const +{ + return m_fx; +} + +double +PinholeCamera::Parameters::fy(void) const +{ + return m_fy; +} + +double +PinholeCamera::Parameters::cx(void) const +{ + return m_cx; +} + +double +PinholeCamera::Parameters::cy(void) const +{ + return m_cy; +} + +bool +PinholeCamera::Parameters::readFromYamlFile(const std::string& filename) +{ + cv::FileStorage fs(filename, cv::FileStorage::READ); + + if (!fs.isOpened()) + { + return false; + } + + if (!fs["model_type"].isNone()) + { + std::string sModelType; + fs["model_type"] >> sModelType; + + if (sModelType.compare("PINHOLE") != 0) + { + return false; + } + } + + m_modelType = PINHOLE; + fs["camera_name"] >> m_cameraName; + m_imageWidth = static_cast(fs["image_width"]); + m_imageHeight = static_cast(fs["image_height"]); + + cv::FileNode n = fs["distortion_parameters"]; + m_k1 = static_cast(n["k1"]); + m_k2 = static_cast(n["k2"]); + m_p1 = static_cast(n["p1"]); + m_p2 = static_cast(n["p2"]); + + n = fs["projection_parameters"]; + m_fx = static_cast(n["fx"]); + m_fy = static_cast(n["fy"]); + m_cx = static_cast(n["cx"]); + m_cy = static_cast(n["cy"]); + + return true; +} + +void +PinholeCamera::Parameters::writeToYamlFile(const std::string& filename) const +{ + cv::FileStorage fs(filename, cv::FileStorage::WRITE); + + fs << "model_type" << "PINHOLE"; + fs << "camera_name" << m_cameraName; + fs << "image_width" << m_imageWidth; + fs << "image_height" << m_imageHeight; + + // radial distortion: k1, k2 + // tangential distortion: p1, p2 + fs << "distortion_parameters"; + fs << "{" << "k1" << m_k1 + << "k2" << m_k2 + << "p1" << m_p1 + << "p2" << m_p2 << "}"; + + // projection: fx, fy, cx, cy + fs << "projection_parameters"; + fs << "{" << "fx" << m_fx + << "fy" << m_fy + << "cx" << m_cx + << "cy" << m_cy << "}"; + + fs.release(); +} + +PinholeCamera::Parameters& +PinholeCamera::Parameters::operator=(const PinholeCamera::Parameters& other) +{ + if (this != &other) + { + m_modelType = other.m_modelType; + m_cameraName = other.m_cameraName; + m_imageWidth = other.m_imageWidth; + m_imageHeight = other.m_imageHeight; + m_k1 = other.m_k1; + m_k2 = other.m_k2; + m_p1 = other.m_p1; + m_p2 = other.m_p2; + m_fx = other.m_fx; + m_fy = other.m_fy; + m_cx = other.m_cx; + m_cy = other.m_cy; + } + + return *this; +} + +std::ostream& +operator<< (std::ostream& out, const PinholeCamera::Parameters& params) +{ + out << "Camera Parameters:" << std::endl; + out << " model_type " << "PINHOLE" << std::endl; + out << " camera_name " << params.m_cameraName << std::endl; + out << " image_width " << params.m_imageWidth << std::endl; + out << " image_height " << params.m_imageHeight << std::endl; + + // radial distortion: k1, k2 + // tangential distortion: p1, p2 + out << "Distortion Parameters" << std::endl; + out << " k1 " << params.m_k1 << std::endl + << " k2 " << params.m_k2 << std::endl + << " p1 " << params.m_p1 << std::endl + << " p2 " << params.m_p2 << std::endl; + + // projection: fx, fy, cx, cy + out << "Projection Parameters" << std::endl; + out << " fx " << params.m_fx << std::endl + << " fy " << params.m_fy << std::endl + << " cx " << params.m_cx << std::endl + << " cy " << params.m_cy << std::endl; + + return out; +} + +PinholeCamera::PinholeCamera() + : m_inv_K11(1.0) + , m_inv_K13(0.0) + , m_inv_K22(1.0) + , m_inv_K23(0.0) + , m_noDistortion(true) +{ + +} + +PinholeCamera::PinholeCamera(const std::string& cameraName, + int imageWidth, int imageHeight, + double k1, double k2, double p1, double p2, + double fx, double fy, double cx, double cy) + : mParameters(cameraName, imageWidth, imageHeight, + k1, k2, p1, p2, fx, fy, cx, cy) +{ + if ((mParameters.k1() == 0.0) && + (mParameters.k2() == 0.0) && + (mParameters.p1() == 0.0) && + (mParameters.p2() == 0.0)) + { + m_noDistortion = true; + } + else + { + m_noDistortion = false; + } + + // Inverse camera projection matrix parameters + m_inv_K11 = 1.0 / mParameters.fx(); + m_inv_K13 = -mParameters.cx() / mParameters.fx(); + m_inv_K22 = 1.0 / mParameters.fy(); + m_inv_K23 = -mParameters.cy() / mParameters.fy(); +} + +PinholeCamera::PinholeCamera(const PinholeCamera::Parameters& params) + : mParameters(params) +{ + if ((mParameters.k1() == 0.0) && + (mParameters.k2() == 0.0) && + (mParameters.p1() == 0.0) && + (mParameters.p2() == 0.0)) + { + m_noDistortion = true; + } + else + { + m_noDistortion = false; + } + + // Inverse camera projection matrix parameters + m_inv_K11 = 1.0 / mParameters.fx(); + m_inv_K13 = -mParameters.cx() / mParameters.fx(); + m_inv_K22 = 1.0 / mParameters.fy(); + m_inv_K23 = -mParameters.cy() / mParameters.fy(); +} + +Camera::ModelType +PinholeCamera::modelType(void) const +{ + return mParameters.modelType(); +} + +const std::string& +PinholeCamera::cameraName(void) const +{ + return mParameters.cameraName(); +} + +int +PinholeCamera::imageWidth(void) const +{ + return mParameters.imageWidth(); +} + +int +PinholeCamera::imageHeight(void) const +{ + return mParameters.imageHeight(); +} + +void +PinholeCamera::estimateIntrinsics(const cv::Size& boardSize, + const std::vector< std::vector >& objectPoints, + const std::vector< std::vector >& imagePoints) +{ + // Z. Zhang, A Flexible New Technique for Camera Calibration, PAMI 2000 + + Parameters params = getParameters(); + + params.k1() = 0.0; + params.k2() = 0.0; + params.p1() = 0.0; + params.p2() = 0.0; + + double cx = params.imageWidth() / 2.0; + double cy = params.imageHeight() / 2.0; + params.cx() = cx; + params.cy() = cy; + + size_t nImages = imagePoints.size(); + + cv::Mat A(nImages * 2, 2, CV_64F); + cv::Mat b(nImages * 2, 1, CV_64F); + + for (size_t i = 0; i < nImages; ++i) + { + const std::vector& oPoints = objectPoints.at(i); + + std::vector M(oPoints.size()); + for (size_t j = 0; j < M.size(); ++j) + { + M.at(j) = cv::Point2f(oPoints.at(j).x, oPoints.at(j).y); + } + + cv::Mat H = cv::findHomography(M, imagePoints.at(i)); + + H.at(0,0) -= H.at(2,0) * cx; + H.at(0,1) -= H.at(2,1) * cx; + H.at(0,2) -= H.at(2,2) * cx; + H.at(1,0) -= H.at(2,0) * cy; + H.at(1,1) -= H.at(2,1) * cy; + H.at(1,2) -= H.at(2,2) * cy; + + double h[3], v[3], d1[3], d2[3]; + double n[4] = {0,0,0,0}; + + for (int j = 0; j < 3; ++j) + { + double t0 = H.at(j,0); + double t1 = H.at(j,1); + h[j] = t0; v[j] = t1; + d1[j] = (t0 + t1) * 0.5; + d2[j] = (t0 - t1) * 0.5; + n[0] += t0 * t0; n[1] += t1 * t1; + n[2] += d1[j] * d1[j]; n[3] += d2[j] * d2[j]; + } + + for (int j = 0; j < 4; ++j) + { + n[j] = 1.0 / sqrt(n[j]); + } + + for (int j = 0; j < 3; ++j) + { + h[j] *= n[0]; v[j] *= n[1]; + d1[j] *= n[2]; d2[j] *= n[3]; + } + + A.at(i * 2, 0) = h[0] * v[0]; + A.at(i * 2, 1) = h[1] * v[1]; + A.at(i * 2 + 1, 0) = d1[0] * d2[0]; + A.at(i * 2 + 1, 1) = d1[1] * d2[1]; + b.at(i * 2, 0) = -h[2] * v[2]; + b.at(i * 2 + 1, 0) = -d1[2] * d2[2]; + } + + cv::Mat f(2, 1, CV_64F); + cv::solve(A, b, f, cv::DECOMP_NORMAL | cv::DECOMP_LU); + + params.fx() = sqrt(fabs(1.0 / f.at(0))); + params.fy() = sqrt(fabs(1.0 / f.at(1))); + + setParameters(params); +} + +/** + * \brief Lifts a point from the image plane to the unit sphere + * + * \param p image coordinates + * \param P coordinates of the point on the sphere + */ +void +PinholeCamera::liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const +{ + liftProjective(p, P); + + P.normalize(); +} + +/** + * \brief Lifts a point from the image plane to its projective ray + * + * \param p image coordinates + * \param P coordinates of the projective ray + */ +void +PinholeCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const +{ + double mx_d, my_d,mx2_d, mxy_d, my2_d, mx_u, my_u; + double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d; + //double lambda; + + // Lift points to normalised plane + mx_d = m_inv_K11 * p(0) + m_inv_K13; + my_d = m_inv_K22 * p(1) + m_inv_K23; + + if (m_noDistortion) + { + mx_u = mx_d; + my_u = my_d; + } + else + { + if (0) + { + double k1 = mParameters.k1(); + double k2 = mParameters.k2(); + double p1 = mParameters.p1(); + double p2 = mParameters.p2(); + + // Apply inverse distortion model + // proposed by Heikkila + mx2_d = mx_d*mx_d; + my2_d = my_d*my_d; + mxy_d = mx_d*my_d; + rho2_d = mx2_d+my2_d; + rho4_d = rho2_d*rho2_d; + radDist_d = k1*rho2_d+k2*rho4_d; + Dx_d = mx_d*radDist_d + p2*(rho2_d+2*mx2_d) + 2*p1*mxy_d; + Dy_d = my_d*radDist_d + p1*(rho2_d+2*my2_d) + 2*p2*mxy_d; + inv_denom_d = 1/(1+4*k1*rho2_d+6*k2*rho4_d+8*p1*my_d+8*p2*mx_d); + + mx_u = mx_d - inv_denom_d*Dx_d; + my_u = my_d - inv_denom_d*Dy_d; + } + else + { + // Recursive distortion model + int n = 8; + Eigen::Vector2d d_u; + distortion(Eigen::Vector2d(mx_d, my_d), d_u); + // Approximate value + mx_u = mx_d - d_u(0); + my_u = my_d - d_u(1); + + for (int i = 1; i < n; ++i) + { + distortion(Eigen::Vector2d(mx_u, my_u), d_u); + mx_u = mx_d - d_u(0); + my_u = my_d - d_u(1); + } + } + } + + // Obtain a projective ray + P << mx_u, my_u, 1.0; +} + + +/** + * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v) + * + * \param P 3D point coordinates + * \param p return value, contains the image point coordinates + */ +void +PinholeCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const +{ + Eigen::Vector2d p_u, p_d; + + // Project points to the normalised plane + p_u << P(0) / P(2), P(1) / P(2); + + if (m_noDistortion) + { + p_d = p_u; + } + else + { + // Apply distortion + Eigen::Vector2d d_u; + distortion(p_u, d_u); + p_d = p_u + d_u; + } + + // Apply generalised projection matrix + p << mParameters.fx() * p_d(0) + mParameters.cx(), + mParameters.fy() * p_d(1) + mParameters.cy(); +} + +#if 0 +/** + * \brief Project a 3D point to the image plane and calculate Jacobian + * + * \param P 3D point coordinates + * \param p return value, contains the image point coordinates + */ +void +PinholeCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, + Eigen::Matrix& J) const +{ + Eigen::Vector2d p_u, p_d; + double norm, inv_denom; + double dxdmx, dydmx, dxdmy, dydmy; + + norm = P.norm(); + // Project points to the normalised plane + inv_denom = 1.0 / P(2); + p_u << inv_denom * P(0), inv_denom * P(1); + + // Calculate jacobian + double dudx = inv_denom; + double dvdx = 0.0; + double dudy = 0.0; + double dvdy = inv_denom; + inv_denom = - inv_denom * inv_denom; + double dudz = P(0) * inv_denom; + double dvdz = P(1) * inv_denom; + + if (m_noDistortion) + { + p_d = p_u; + } + else + { + // Apply distortion + Eigen::Vector2d d_u; + distortion(p_u, d_u); + p_d = p_u + d_u; + } + + double fx = mParameters.fx(); + double fy = mParameters.fy(); + + // Make the product of the jacobians + // and add projection matrix jacobian + inv_denom = fx * (dudx * dxdmx + dvdx * dxdmy); // reuse + dvdx = fy * (dudx * dydmx + dvdx * dydmy); + dudx = inv_denom; + + inv_denom = fx * (dudy * dxdmx + dvdy * dxdmy); // reuse + dvdy = fy * (dudy * dydmx + dvdy * dydmy); + dudy = inv_denom; + + inv_denom = fx * (dudz * dxdmx + dvdz * dxdmy); // reuse + dvdz = fy * (dudz * dydmx + dvdz * dydmy); + dudz = inv_denom; + + // Apply generalised projection matrix + p << fx * p_d(0) + mParameters.cx(), + fy * p_d(1) + mParameters.cy(); + + J << dudx, dudy, dudz, + dvdx, dvdy, dvdz; +} +#endif + +/** + * \brief Projects an undistorted 2D point p_u to the image plane + * + * \param p_u 2D point coordinates + * \return image point coordinates + */ +void +PinholeCamera::undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const +{ + Eigen::Vector2d p_d; + + if (m_noDistortion) + { + p_d = p_u; + } + else + { + // Apply distortion + Eigen::Vector2d d_u; + distortion(p_u, d_u); + p_d = p_u + d_u; + } + + // Apply generalised projection matrix + p << mParameters.fx() * p_d(0) + mParameters.cx(), + mParameters.fy() * p_d(1) + mParameters.cy(); +} + +/** + * \brief Apply distortion to input point (from the normalised plane) + * + * \param p_u undistorted coordinates of point on the normalised plane + * \return to obtain the distorted point: p_d = p_u + d_u + */ +void +PinholeCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const +{ + double k1 = mParameters.k1(); + double k2 = mParameters.k2(); + double p1 = mParameters.p1(); + double p2 = mParameters.p2(); + + double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u; + + mx2_u = p_u(0) * p_u(0); + my2_u = p_u(1) * p_u(1); + mxy_u = p_u(0) * p_u(1); + rho2_u = mx2_u + my2_u; + rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u; + d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u), + p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u); +} + +/** + * \brief Apply distortion to input point (from the normalised plane) + * and calculate Jacobian + * + * \param p_u undistorted coordinates of point on the normalised plane + * \return to obtain the distorted point: p_d = p_u + d_u + */ +void +PinholeCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, + Eigen::Matrix2d& J) const +{ + double k1 = mParameters.k1(); + double k2 = mParameters.k2(); + double p1 = mParameters.p1(); + double p2 = mParameters.p2(); + + double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u; + + mx2_u = p_u(0) * p_u(0); + my2_u = p_u(1) * p_u(1); + mxy_u = p_u(0) * p_u(1); + rho2_u = mx2_u + my2_u; + rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u; + d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u), + p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u); + + double dxdmx = 1.0 + rad_dist_u + k1 * 2.0 * mx2_u + k2 * rho2_u * 4.0 * mx2_u + 2.0 * p1 * p_u(1) + 6.0 * p2 * p_u(0); + double dydmx = k1 * 2.0 * p_u(0) * p_u(1) + k2 * 4.0 * rho2_u * p_u(0) * p_u(1) + p1 * 2.0 * p_u(0) + 2.0 * p2 * p_u(1); + double dxdmy = dydmx; + double dydmy = 1.0 + rad_dist_u + k1 * 2.0 * my2_u + k2 * rho2_u * 4.0 * my2_u + 6.0 * p1 * p_u(1) + 2.0 * p2 * p_u(0); + + J << dxdmx, dxdmy, + dydmx, dydmy; +} + +void +PinholeCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const +{ + cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight()); + + cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F); + cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F); + + for (int v = 0; v < imageSize.height; ++v) + { + for (int u = 0; u < imageSize.width; ++u) + { + double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale; + double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale; + + Eigen::Vector3d P; + P << mx_u, my_u, 1.0; + + Eigen::Vector2d p; + spaceToPlane(P, p); + + mapX.at(v,u) = p(0); + mapY.at(v,u) = p(1); + } + } + + cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); +} + +cv::Mat +PinholeCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, + float fx, float fy, + cv::Size imageSize, + float cx, float cy, + cv::Mat rmat) const +{ + if (imageSize == cv::Size(0, 0)) + { + imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight()); + } + + cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); + cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); + + Eigen::Matrix3f R, R_inv; + cv::cv2eigen(rmat, R); + R_inv = R.inverse(); + + // assume no skew + Eigen::Matrix3f K_rect; + + if (cx == -1.0f || cy == -1.0f) + { + K_rect << fx, 0, imageSize.width / 2, + 0, fy, imageSize.height / 2, + 0, 0, 1; + } + else + { + K_rect << fx, 0, cx, + 0, fy, cy, + 0, 0, 1; + } + + if (fx == -1.0f || fy == -1.0f) + { + K_rect(0,0) = mParameters.fx(); + K_rect(1,1) = mParameters.fy(); + } + + Eigen::Matrix3f K_rect_inv = K_rect.inverse(); + + for (int v = 0; v < imageSize.height; ++v) + { + for (int u = 0; u < imageSize.width; ++u) + { + Eigen::Vector3f xo; + xo << u, v, 1; + + Eigen::Vector3f uo = R_inv * K_rect_inv * xo; + + Eigen::Vector2d p; + spaceToPlane(uo.cast(), p); + + mapX.at(v,u) = p(0); + mapY.at(v,u) = p(1); + } + } + + cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); + + cv::Mat K_rect_cv; + cv::eigen2cv(K_rect, K_rect_cv); + return K_rect_cv; +} + +int +PinholeCamera::parameterCount(void) const +{ + return 8; +} + +const PinholeCamera::Parameters& +PinholeCamera::getParameters(void) const +{ + return mParameters; +} + +void +PinholeCamera::setParameters(const PinholeCamera::Parameters& parameters) +{ + mParameters = parameters; + + if ((mParameters.k1() == 0.0) && + (mParameters.k2() == 0.0) && + (mParameters.p1() == 0.0) && + (mParameters.p2() == 0.0)) + { + m_noDistortion = true; + } + else + { + m_noDistortion = false; + } + + m_inv_K11 = 1.0 / mParameters.fx(); + m_inv_K13 = -mParameters.cx() / mParameters.fx(); + m_inv_K22 = 1.0 / mParameters.fy(); + m_inv_K23 = -mParameters.cy() / mParameters.fy(); +} + +void +PinholeCamera::readParameters(const std::vector& parameterVec) +{ + if ((int)parameterVec.size() != parameterCount()) + { + return; + } + + Parameters params = getParameters(); + + params.k1() = parameterVec.at(0); + params.k2() = parameterVec.at(1); + params.p1() = parameterVec.at(2); + params.p2() = parameterVec.at(3); + params.fx() = parameterVec.at(4); + params.fy() = parameterVec.at(5); + params.cx() = parameterVec.at(6); + params.cy() = parameterVec.at(7); + + setParameters(params); +} + +void +PinholeCamera::writeParameters(std::vector& parameterVec) const +{ + parameterVec.resize(parameterCount()); + parameterVec.at(0) = mParameters.k1(); + parameterVec.at(1) = mParameters.k2(); + parameterVec.at(2) = mParameters.p1(); + parameterVec.at(3) = mParameters.p2(); + parameterVec.at(4) = mParameters.fx(); + parameterVec.at(5) = mParameters.fy(); + parameterVec.at(6) = mParameters.cx(); + parameterVec.at(7) = mParameters.cy(); +} + +void +PinholeCamera::writeParametersToYamlFile(const std::string& filename) const +{ + mParameters.writeToYamlFile(filename); +} + +std::string +PinholeCamera::parametersToString(void) const +{ + std::ostringstream oss; + oss << mParameters; + + return oss.str(); +} + +} diff --git a/camera_models/src/camera_models/PinholeFullCamera.cc b/camera_models/src/camera_models/PinholeFullCamera.cc new file mode 100644 index 0000000..5365c06 --- /dev/null +++ b/camera_models/src/camera_models/PinholeFullCamera.cc @@ -0,0 +1,1000 @@ +#include "camodocal/camera_models/PinholeFullCamera.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "camodocal/gpl/gpl.h" + +namespace camodocal +{ + +PinholeFullCamera::Parameters::Parameters( ) +: Camera::Parameters( PINHOLE_FULL ) +, m_k1( 0.0 ) +, m_k2( 0.0 ) +, m_k3( 0.0 ) +, m_k4( 0.0 ) +, m_k5( 0.0 ) +, m_k6( 0.0 ) +, m_p1( 0.0 ) +, m_p2( 0.0 ) +, m_fx( 0.0 ) +, m_fy( 0.0 ) +, m_cx( 0.0 ) +, m_cy( 0.0 ) +{ +} + +PinholeFullCamera::Parameters::Parameters( const std::string& cameraName, // + int w, + int h, + double k1, + double k2, + double k3, + double k4, + double k5, + double k6, + double p1, + double p2, + double fx, + double fy, + double cx, + double cy ) +: Camera::Parameters( PINHOLE_FULL, cameraName, w, h ) +, m_k1( k1 ) +, m_k2( k2 ) +, m_k3( k3 ) +, m_k4( k4 ) +, m_k5( k5 ) +, m_k6( k6 ) +, m_p1( p1 ) +, m_p2( p2 ) +, m_fx( fx ) +, m_fy( fy ) +, m_cx( cx ) +, m_cy( cy ) +{ +} + +double& +PinholeFullCamera::Parameters::k1( void ) +{ + return m_k1; +} + +double& +PinholeFullCamera::Parameters::k2( void ) +{ + return m_k2; +} + +double& +PinholeFullCamera::Parameters::k3( ) +{ + return m_k3; +} + +double& +PinholeFullCamera::Parameters::k4( ) +{ + return m_k4; +} + +double& +PinholeFullCamera::Parameters::k5( ) +{ + return m_k5; +} + +double& +PinholeFullCamera::Parameters::k6( ) +{ + return m_k6; +} + +double& +PinholeFullCamera::Parameters::p1( void ) +{ + return m_p1; +} + +double& +PinholeFullCamera::Parameters::p2( void ) +{ + return m_p2; +} + +double& +PinholeFullCamera::Parameters::fx( void ) +{ + return m_fx; +} + +double& +PinholeFullCamera::Parameters::fy( void ) +{ + return m_fy; +} + +double& +PinholeFullCamera::Parameters::cx( void ) +{ + return m_cx; +} + +double& +PinholeFullCamera::Parameters::cy( void ) +{ + return m_cy; +} + +double +PinholeFullCamera::Parameters::k1( void ) const +{ + return m_k1; +} + +double +PinholeFullCamera::Parameters::k2( void ) const +{ + return m_k2; +} + +double +PinholeFullCamera::Parameters::p1( void ) const +{ + return m_p1; +} + +double +PinholeFullCamera::Parameters::p2( void ) const +{ + return m_p2; +} + +double +PinholeFullCamera::Parameters::fx( void ) const +{ + return m_fx; +} + +double +PinholeFullCamera::Parameters::fy( void ) const +{ + return m_fy; +} + +double +PinholeFullCamera::Parameters::cx( void ) const +{ + return m_cx; +} + +double +PinholeFullCamera::Parameters::cy( void ) const +{ + return m_cy; +} + +double +PinholeFullCamera::Parameters::k3( ) const +{ + return m_k3; +} + +double +PinholeFullCamera::Parameters::k4( ) const +{ + return m_k4; +} + +double +PinholeFullCamera::Parameters::k5( ) const +{ + return m_k5; +} + +double +PinholeFullCamera::Parameters::k6( ) const +{ + return m_k6; +} + +bool +PinholeFullCamera::Parameters::readFromYamlFile( const std::string& filename ) +{ + cv::FileStorage fs( filename, cv::FileStorage::READ ); + + if ( !fs.isOpened( ) ) + { + return false; + } + + if ( !fs["model_type"].isNone( ) ) + { + std::string sModelType; + fs["model_type"] >> sModelType; + + if ( sModelType.compare( "PINHOLE_FULL" ) != 0 ) + { + return false; + } + } + + m_modelType = PINHOLE_FULL; + fs["camera_name"] >> m_cameraName; + m_imageWidth = static_cast< int >( fs["image_width"] ); + m_imageHeight = static_cast< int >( fs["image_height"] ); + + cv::FileNode n = fs["distortion_parameters"]; + m_k1 = static_cast< double >( n["k1"] ); + m_k2 = static_cast< double >( n["k2"] ); + m_k3 = static_cast< double >( n["k3"] ); + m_k4 = static_cast< double >( n["k4"] ); + m_k5 = static_cast< double >( n["k5"] ); + m_k6 = static_cast< double >( n["k6"] ); + m_p1 = static_cast< double >( n["p1"] ); + m_p2 = static_cast< double >( n["p2"] ); + + n = fs["projection_parameters"]; + m_fx = static_cast< double >( n["fx"] ); + m_fy = static_cast< double >( n["fy"] ); + m_cx = static_cast< double >( n["cx"] ); + m_cy = static_cast< double >( n["cy"] ); + + return true; +} + +void +PinholeFullCamera::Parameters::writeToYamlFile( const std::string& filename ) const +{ + cv::FileStorage fs( filename, cv::FileStorage::WRITE ); + + fs << "model_type" + << "PINHOLE_FULL"; + fs << "camera_name" << m_cameraName; + fs << "image_width" << m_imageWidth; + fs << "image_height" << m_imageHeight; + + // radial distortion: k1, k2 + // tangential distortion: p1, p2 + fs << "distortion_parameters"; + fs << "{" + << "k1" << m_k1 << "k2" << m_k2 << "k3" << m_k3 << "k4" << m_k4 << "k5" << m_k5 + << "k6" << m_k6 << "p1" << m_p1 << "p2" << m_p2 << "}"; + + // projection: fx, fy, cx, cy + fs << "projection_parameters"; + fs << "{" + << "fx" << m_fx << "fy" << m_fy << "cx" << m_cx << "cy" << m_cy << "}"; + + fs.release( ); +} + +PinholeFullCamera::Parameters& +PinholeFullCamera::Parameters::operator=( const PinholeFullCamera::Parameters& other ) +{ + if ( this != &other ) + { + m_modelType = other.m_modelType; + m_cameraName = other.m_cameraName; + m_imageWidth = other.m_imageWidth; + m_imageHeight = other.m_imageHeight; + m_k1 = other.m_k1; + m_k2 = other.m_k2; + m_k3 = other.m_k3; + m_k4 = other.m_k4; + m_k5 = other.m_k5; + m_k6 = other.m_k6; + m_p1 = other.m_p1; + m_p2 = other.m_p2; + m_fx = other.m_fx; + m_fy = other.m_fy; + m_cx = other.m_cx; + m_cy = other.m_cy; + } + + return *this; +} + +std::ostream& +operator<<( std::ostream& out, const PinholeFullCamera::Parameters& params ) +{ + out << "Camera Parameters:" << std::endl; + out << " model_type " + << "PINHOLE_FULL" << std::endl; + out << " camera_name " << params.m_cameraName << std::endl; + out << " image_width " << params.m_imageWidth << std::endl; + out << " image_height " << params.m_imageHeight << std::endl; + + // radial distortion: k1, k2 + // tangential distortion: p1, p2 + out << "Distortion Parameters" << std::endl; + out << " k1 " << params.m_k1 << std::endl + << " k2 " << params.m_k2 << std::endl + << " k3 " << params.m_k3 << std::endl + << " k4 " << params.m_k4 << std::endl + << " k5 " << params.m_k5 << std::endl + << " k6 " << params.m_k6 << std::endl + << " p1 " << params.m_p1 << std::endl + << " p2 " << params.m_p2 << std::endl; + + // projection: fx, fy, cx, cy + out << "Projection Parameters" << std::endl; + out << " fx " << params.m_fx << std::endl + << " fy " << params.m_fy << std::endl + << " cx " << params.m_cx << std::endl + << " cy " << params.m_cy << std::endl; + + return out; +} + +PinholeFullCamera::PinholeFullCamera( ) +: m_inv_K11( 1.0 ) +, m_inv_K13( 0.0 ) +, m_inv_K22( 1.0 ) +, m_inv_K23( 0.0 ) +, m_noDistortion( true ) +{ +} + +PinholeFullCamera::PinholeFullCamera( const std::string& cameraName, + int imageWidth, + int imageHeight, + double k1, + double k2, + double k3, + double k4, + double k5, + double k6, + double p1, + double p2, + double fx, + double fy, + double cx, + double cy ) +: mParameters( cameraName, imageWidth, imageHeight, k1, k2, k3, k4, k5, k6, p1, p2, fx, fy, cx, cy ) +{ + if ( ( mParameters.k1( ) == 0.0 ) && ( mParameters.k2( ) == 0.0 ) + && ( mParameters.p1( ) == 0.0 ) && ( mParameters.p2( ) == 0.0 ) ) + { + m_noDistortion = true; + } + else + { + m_noDistortion = false; + } + + // Inverse camera projection matrix parameters + m_inv_K11 = 1.0 / mParameters.fx( ); + m_inv_K13 = -mParameters.cx( ) / mParameters.fx( ); + m_inv_K22 = 1.0 / mParameters.fy( ); + m_inv_K23 = -mParameters.cy( ) / mParameters.fy( ); +} + +PinholeFullCamera::PinholeFullCamera( const PinholeFullCamera::Parameters& params ) +: mParameters( params ) +{ + if ( ( mParameters.k1( ) == 0.0 ) && ( mParameters.k2( ) == 0.0 ) + && ( mParameters.p1( ) == 0.0 ) && ( mParameters.p2( ) == 0.0 ) ) + { + m_noDistortion = true; + } + else + { + m_noDistortion = false; + } + + // Inverse camera projection matrix parameters + m_inv_K11 = 1.0 / mParameters.fx( ); + m_inv_K13 = -mParameters.cx( ) / mParameters.fx( ); + m_inv_K22 = 1.0 / mParameters.fy( ); + m_inv_K23 = -mParameters.cy( ) / mParameters.fy( ); +} + +Camera::ModelType +PinholeFullCamera::modelType( void ) const +{ + return mParameters.modelType( ); +} + +const std::string& +PinholeFullCamera::cameraName( void ) const +{ + return mParameters.cameraName( ); +} + +int +PinholeFullCamera::imageWidth( void ) const +{ + return mParameters.imageWidth( ); +} + +int +PinholeFullCamera::imageHeight( void ) const +{ + return mParameters.imageHeight( ); +} + +void +PinholeFullCamera::estimateIntrinsics( const cv::Size& boardSize, + const std::vector< std::vector< cv::Point3f > >& objectPoints, + const std::vector< std::vector< cv::Point2f > >& imagePoints ) +{ + // Z. Zhang, A Flexible New Technique for Camera Calibration, PAMI 2000 + + Parameters params = getParameters( ); + + params.k1( ) = 0.0; + params.k2( ) = 0.0; + params.p1( ) = 0.0; + params.p2( ) = 0.0; + + double cx = params.imageWidth( ) / 2.0; + double cy = params.imageHeight( ) / 2.0; + params.cx( ) = cx; + params.cy( ) = cy; + + size_t nImages = imagePoints.size( ); + + cv::Mat A( nImages * 2, 2, CV_64F ); + cv::Mat b( nImages * 2, 1, CV_64F ); + + for ( size_t i = 0; i < nImages; ++i ) + { + const std::vector< cv::Point3f >& oPoints = objectPoints.at( i ); + + std::vector< cv::Point2f > M( oPoints.size( ) ); + for ( size_t j = 0; j < M.size( ); ++j ) + { + M.at( j ) = cv::Point2f( oPoints.at( j ).x, oPoints.at( j ).y ); + } + + cv::Mat H = cv::findHomography( M, imagePoints.at( i ) ); + + H.at< double >( 0, 0 ) -= H.at< double >( 2, 0 ) * cx; + H.at< double >( 0, 1 ) -= H.at< double >( 2, 1 ) * cx; + H.at< double >( 0, 2 ) -= H.at< double >( 2, 2 ) * cx; + H.at< double >( 1, 0 ) -= H.at< double >( 2, 0 ) * cy; + H.at< double >( 1, 1 ) -= H.at< double >( 2, 1 ) * cy; + H.at< double >( 1, 2 ) -= H.at< double >( 2, 2 ) * cy; + + double h[3], v[3], d1[3], d2[3]; + double n[4] = { 0, 0, 0, 0 }; + + for ( int j = 0; j < 3; ++j ) + { + double t0 = H.at< double >( j, 0 ); + double t1 = H.at< double >( j, 1 ); + h[j] = t0; + v[j] = t1; + d1[j] = ( t0 + t1 ) * 0.5; + d2[j] = ( t0 - t1 ) * 0.5; + n[0] += t0 * t0; + n[1] += t1 * t1; + n[2] += d1[j] * d1[j]; + n[3] += d2[j] * d2[j]; + } + + for ( int j = 0; j < 4; ++j ) + { + n[j] = 1.0 / sqrt( n[j] ); + } + + for ( int j = 0; j < 3; ++j ) + { + h[j] *= n[0]; + v[j] *= n[1]; + d1[j] *= n[2]; + d2[j] *= n[3]; + } + + A.at< double >( i * 2, 0 ) = h[0] * v[0]; + A.at< double >( i * 2, 1 ) = h[1] * v[1]; + A.at< double >( i * 2 + 1, 0 ) = d1[0] * d2[0]; + A.at< double >( i * 2 + 1, 1 ) = d1[1] * d2[1]; + b.at< double >( i * 2, 0 ) = -h[2] * v[2]; + b.at< double >( i * 2 + 1, 0 ) = -d1[2] * d2[2]; + } + + cv::Mat f( 2, 1, CV_64F ); + cv::solve( A, b, f, cv::DECOMP_NORMAL | cv::DECOMP_LU ); + + params.fx( ) = sqrt( fabs( 1.0 / f.at< double >( 0 ) ) ); + params.fy( ) = sqrt( fabs( 1.0 / f.at< double >( 1 ) ) ); + + setParameters( params ); +} + +/** + * \brief Lifts a point from the image plane to the unit sphere + * + * \param p image coordinates + * \param P coordinates of the point on the sphere + */ +void +PinholeFullCamera::liftSphere( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const +{ + liftProjective( p, P ); + + P.normalize( ); +} + +/** + * \brief Lifts a point from the image plane to its projective ray + * + * \param p image coordinates + * \param P coordinates of the projective ray + */ +void +PinholeFullCamera::liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const +{ + double k1 = mParameters.k1( ); + double k2 = mParameters.k2( ); + double k3 = mParameters.k3( ); + double k4 = mParameters.k4( ); + double k5 = mParameters.k5( ); + double k6 = mParameters.k6( ); + double p1 = mParameters.p1( ); + double p2 = mParameters.p2( ); + + double fx = mParameters.fx( ); + double fy = mParameters.fy( ); + double ifx = 1. / fx; + double ify = 1. / fy; + double cx = mParameters.cx( ); + double cy = mParameters.cy( ); + + // Lift points to normalised plane + double mx_d = ifx * p( 0 ) + m_inv_K13; + double my_d = ify * p( 1 ) + m_inv_K23; + double u = p( 0 ); + double v = p( 1 ); + double x = mx_d; + double y = my_d; + double x0 = x; + double y0 = y; + + double error = std::numeric_limits< double >::max( ); + + int max_cnt = 8; // 5 + int min_error = 0.01; + for ( int j = 0;; j++ ) + { + if ( j > max_cnt ) + break; + if ( error < min_error ) + break; + + double r2 = x * x + y * y; + double icdist = ( 1 + ( ( k6 * r2 + k5 ) * r2 + k4 ) * r2 ) + / ( 1 + ( ( k3 * r2 + k2 ) * r2 + k1 ) * r2 ); + double deltaX = 2 * p1 * x * y + p2 * ( r2 + 2 * x * x ); + double deltaY = p1 * ( r2 + 2 * y * y ) + 2 * p2 * x * y; + + x = ( x0 - deltaX ) * icdist; + y = ( y0 - deltaY ) * icdist; + + if ( 1 ) + { + double r4, r6, a1, a2, a3, cdist, icdist2; + double xd, yd, xd0, yd0; + + r2 = x * x + y * y; + r4 = r2 * r2; + r6 = r4 * r2; + a1 = 2 * x * y; + a2 = r2 + 2 * x * x; + a3 = r2 + 2 * y * y; + cdist = 1 + k1 * r2 + k2 * r4 + k3 * r6; + icdist2 = 1. / ( 1 + k4 * r2 + k5 * r4 + k6 * r6 ); + xd0 = x * cdist * icdist2 + p1 * a1 + p2 * a2; + yd0 = y * cdist * icdist2 + p1 * a3 + p2 * a1; + + double x_proj = xd * fx + cx; + double y_proj = yd * fy + cy; + + error = sqrt( pow( x_proj - u, 2 ) + pow( y_proj - v, 2 ) ); + } + } + + P << x, y, 1.0; +} + +void +PinholeFullCamera::liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P, float image_scale ) const +{ + Eigen::Vector2d p_tmp = p / image_scale; // p_tmp is without resize, p is with resize + liftProjective( p_tmp, P ); // p_tmp is without resize +} + +/** + * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v) + * + * \param P 3D point coordinates + * \param p return value, contains the image point coordinates + */ +void +PinholeFullCamera::spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p ) const +{ + Eigen::Vector2d p_u, p_d; + + // Project points to the normalised plane + p_u << P( 0 ) / P( 2 ), P( 1 ) / P( 2 ); + + if ( m_noDistortion ) + { + p_d = p_u; + } + else + { + // Apply distortion + Eigen::Vector2d d_u; + distortion( p_u, d_u ); + p_d = p_u + d_u; + } + + // Apply generalised projection matrix + p << mParameters.fx( ) * p_d( 0 ) + mParameters.cx( ), + mParameters.fy( ) * p_d( 1 ) + mParameters.cy( ); +} + +void +PinholeFullCamera::spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p, float image_scalse ) const +{ + Eigen::Vector2d p_tmp; + spaceToPlane( P, p_tmp ); + p = p_tmp * image_scalse; +} + +/** + * \brief Project a 3D point to the image plane and calculate Jacobian + * + * \param P 3D point coordinates + * \param p return value, contains the image point coordinates + */ +void +PinholeFullCamera::spaceToPlane( const Eigen::Vector3d& P, + Eigen::Vector2d& p, + Eigen::Matrix< double, 2, 3 >& J ) const +{ + Eigen::Vector2d p_u, p_d; + double norm, inv_denom; + double dxdmx, dydmx, dxdmy, dydmy; + + norm = P.norm( ); + // Project points to the normalised plane + inv_denom = 1.0 / P( 2 ); + p_u << inv_denom * P( 0 ), inv_denom * P( 1 ); + + // Calculate jacobian + double dudx = inv_denom; + double dvdx = 0.0; + double dudy = 0.0; + double dvdy = inv_denom; + inv_denom = -inv_denom * inv_denom; + double dudz = P( 0 ) * inv_denom; + double dvdz = P( 1 ) * inv_denom; + + if ( m_noDistortion ) + { + p_d = p_u; + } + else + { + // Apply distortion + Eigen::Vector2d d_u; + distortion( p_u, d_u ); + p_d = p_u + d_u; + } + + double fx = mParameters.fx( ); + double fy = mParameters.fy( ); + + // Make the product of the jacobians + // and add projection matrix jacobian + inv_denom = fx * ( dudx * dxdmx + dvdx * dxdmy ); // reuse + dvdx = fy * ( dudx * dydmx + dvdx * dydmy ); + dudx = inv_denom; + + inv_denom = fx * ( dudy * dxdmx + dvdy * dxdmy ); // reuse + dvdy = fy * ( dudy * dydmx + dvdy * dydmy ); + dudy = inv_denom; + + inv_denom = fx * ( dudz * dxdmx + dvdz * dxdmy ); // reuse + dvdz = fy * ( dudz * dydmx + dvdz * dydmy ); + dudz = inv_denom; + + // Apply generalised projection matrix + p << fx * p_d( 0 ) + mParameters.cx( ), fy * p_d( 1 ) + mParameters.cy( ); + + J << dudx, dudy, dudz, dvdx, dvdy, dvdz; +} + +/** + * \brief Projects an undistorted 2D point p_u to the image plane + * + * \param p_u 2D point coordinates + * \return image point coordinates + */ +void +PinholeFullCamera::undistToPlane( const Eigen::Vector2d& p_u, Eigen::Vector2d& p ) const +{ + Eigen::Vector2d p_d; + + if ( m_noDistortion ) + { + p_d = p_u; + } + else + { + // Apply distortion + Eigen::Vector2d d_u; + distortion( p_u, d_u ); + p_d = p_u + d_u; + } + + // Apply generalised projection matrix + p << mParameters.fx( ) * p_d( 0 ) + mParameters.cx( ), + mParameters.fy( ) * p_d( 1 ) + mParameters.cy( ); +} + +/** + * \brief Apply distortion to input point (from the normalised plane) + * + * \param p_u undistorted coordinates of point on the normalised plane + * \return to obtain the distorted point: p_d = p_u + d_u + */ +void +PinholeFullCamera::distortion( const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u ) const +{ + // project 3D object point to the image plane + double k1 = mParameters.k1( ); + double k2 = mParameters.k2( ); + double k3 = mParameters.k3( ); + double k4 = mParameters.k4( ); + double k5 = mParameters.k5( ); + double k6 = mParameters.k6( ); + double p1 = mParameters.p1( ); + double p2 = mParameters.p2( ); + + // Transform to model plane + double x = p_u( 0 ); + double y = p_u( 1 ); + + double r2 = x * x + y * y; + double r4 = r2 * r2; + double r6 = r4 * r2; + double a1 = 2 * x * y; + double a2 = r2 + 2 * x * x; + double a3 = r2 + 2 * y * y; + double cdist = 1 + k1 * r2 + k2 * r4 + k3 * r6; + double icdist2 = 1. / ( 1 + k4 * r2 + k5 * r4 + k6 * r6 ); + + d_u << x * cdist * icdist2 + p1 * a1 + p2 * a2 - x, // + y * cdist * icdist2 + p1 * a3 + p2 * a1 - y; +} + +/** + * \brief Apply distortion to input point (from the normalised plane) + * and calculate Jacobian + * + * \param p_u undistorted coordinates of point on the normalised plane + * \return to obtain the distorted point: p_d = p_u + d_u + */ +void +PinholeFullCamera::distortion( const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, Eigen::Matrix2d& J ) const +{ + // project 3D object point to the image plane + double k1 = mParameters.k1( ); + double k2 = mParameters.k2( ); + double k3 = mParameters.k3( ); + double k4 = mParameters.k4( ); + double k5 = mParameters.k5( ); + double k6 = mParameters.k6( ); + double p1 = mParameters.p1( ); + double p2 = mParameters.p2( ); + + // Transform to model plane + double x = p_u( 0 ); + double y = p_u( 1 ); + + double r2 = x * x + y * y; + double r4 = r2 * r2; + double r6 = r4 * r2; + double a1 = 2 * x * y; + double a2 = r2 + 2 * x * x; + double a3 = r2 + 2 * y * y; + double cdist = 1 + k1 * r2 + k2 * r4 + k3 * r6; + double icdist2 = 1. / ( 1 + k4 * r2 + k5 * r4 + k6 * r6 ); + + d_u << x * cdist * icdist2 + p1 * a1 + p2 * a2 - x, // + y * cdist * icdist2 + p1 * a3 + p2 * a1 - y; // +} + +void +PinholeFullCamera::initUndistortMap( cv::Mat& map1, cv::Mat& map2, double fScale ) const +{ + cv::Size imageSize( mParameters.imageWidth( ), mParameters.imageHeight( ) ); + + cv::Mat mapX = cv::Mat::zeros( imageSize, CV_32F ); + cv::Mat mapY = cv::Mat::zeros( imageSize, CV_32F ); + + for ( int v = 0; v < imageSize.height; ++v ) + { + for ( int u = 0; u < imageSize.width; ++u ) + { + double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale; + double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale; + + Eigen::Vector3d P; + P << mx_u, my_u, 1.0; + + Eigen::Vector2d p; + spaceToPlane( P, p ); + + mapX.at< float >( v, u ) = p( 0 ); + mapY.at< float >( v, u ) = p( 1 ); + } + } + + cv::convertMaps( mapX, mapY, map1, map2, CV_32FC1, false ); +} + +cv::Mat +PinholeFullCamera::initUndistortRectifyMap( +cv::Mat& map1, cv::Mat& map2, float fx, float fy, cv::Size imageSize, float cx, float cy, cv::Mat rmat ) const +{ + if ( imageSize == cv::Size( 0, 0 ) ) + { + imageSize = cv::Size( mParameters.imageWidth( ), mParameters.imageHeight( ) ); + } + + cv::Mat mapX = cv::Mat::zeros( imageSize.height, imageSize.width, CV_32F ); + cv::Mat mapY = cv::Mat::zeros( imageSize.height, imageSize.width, CV_32F ); + + Eigen::Matrix3f R, R_inv; + cv::cv2eigen( rmat, R ); + R_inv = R.inverse( ); + + // assume no skew + Eigen::Matrix3f K_rect; + + if ( cx == -1.0f || cy == -1.0f ) + { + K_rect << fx, 0, imageSize.width / 2, 0, fy, imageSize.height / 2, 0, 0, 1; + } + else + { + K_rect << fx, 0, cx, 0, fy, cy, 0, 0, 1; + } + + if ( fx == -1.0f || fy == -1.0f ) + { + K_rect( 0, 0 ) = mParameters.fx( ); + K_rect( 1, 1 ) = mParameters.fy( ); + } + + Eigen::Matrix3f K_rect_inv = K_rect.inverse( ); + + for ( int v = 0; v < imageSize.height; ++v ) + { + for ( int u = 0; u < imageSize.width; ++u ) + { + Eigen::Vector3f xo; + xo << u, v, 1; + + Eigen::Vector3f uo = R_inv * K_rect_inv * xo; + + Eigen::Vector2d p; + spaceToPlane( uo.cast< double >( ), p ); + + mapX.at< float >( v, u ) = p( 0 ); + mapY.at< float >( v, u ) = p( 1 ); + } + } + + cv::convertMaps( mapX, mapY, map1, map2, CV_32FC1, false ); + + cv::Mat K_rect_cv; + cv::eigen2cv( K_rect, K_rect_cv ); + return K_rect_cv; +} + +int +PinholeFullCamera::parameterCount( void ) const +{ + return 12; +} + +const PinholeFullCamera::Parameters& +PinholeFullCamera::getParameters( void ) const +{ + return mParameters; +} + +void +PinholeFullCamera::setParameters( const PinholeFullCamera::Parameters& parameters ) +{ + mParameters = parameters; + + if ( ( mParameters.k1( ) == 0.0 ) && ( mParameters.k2( ) == 0.0 ) + && ( mParameters.p1( ) == 0.0 ) && ( mParameters.p2( ) == 0.0 ) ) + { + m_noDistortion = true; + } + else + { + m_noDistortion = false; + } + + m_inv_K11 = 1.0 / mParameters.fx( ); + m_inv_K13 = -mParameters.cx( ) / mParameters.fx( ); + m_inv_K22 = 1.0 / mParameters.fy( ); + m_inv_K23 = -mParameters.cy( ) / mParameters.fy( ); +} + +void +PinholeFullCamera::readParameters( const std::vector< double >& parameterVec ) +{ + if ( ( int )parameterVec.size( ) != parameterCount( ) ) + { + return; + } + + Parameters params = getParameters( ); + + params.k1( ) = parameterVec.at( 0 ); + params.k2( ) = parameterVec.at( 1 ); + params.k3( ) = parameterVec.at( 2 ); + params.k4( ) = parameterVec.at( 3 ); + params.k5( ) = parameterVec.at( 4 ); + params.k6( ) = parameterVec.at( 5 ); + params.p1( ) = parameterVec.at( 6 ); + params.p2( ) = parameterVec.at( 7 ); + params.fx( ) = parameterVec.at( 8 ); + params.fy( ) = parameterVec.at( 9 ); + params.cx( ) = parameterVec.at( 10 ); + params.cy( ) = parameterVec.at( 11 ); + + setParameters( params ); +} + +void +PinholeFullCamera::writeParameters( std::vector< double >& parameterVec ) const +{ + parameterVec.resize( parameterCount( ) ); + parameterVec.at( 0 ) = mParameters.k1( ); + parameterVec.at( 1 ) = mParameters.k2( ); + parameterVec.at( 2 ) = mParameters.k3( ); + parameterVec.at( 3 ) = mParameters.k4( ); + parameterVec.at( 4 ) = mParameters.k5( ); + parameterVec.at( 5 ) = mParameters.k6( ); + parameterVec.at( 6 ) = mParameters.p1( ); + parameterVec.at( 7 ) = mParameters.p2( ); + parameterVec.at( 8 ) = mParameters.fx( ); + parameterVec.at( 9 ) = mParameters.fy( ); + parameterVec.at( 10 ) = mParameters.cx( ); + parameterVec.at( 11 ) = mParameters.cy( ); +} + +void +PinholeFullCamera::writeParametersToYamlFile( const std::string& filename ) const +{ + mParameters.writeToYamlFile( filename ); +} + +std::string +PinholeFullCamera::parametersToString( void ) const +{ + std::ostringstream oss; + oss << mParameters; + + return oss.str( ); +} +} diff --git a/camera_models/src/camera_models/ScaramuzzaCamera.cc b/camera_models/src/camera_models/ScaramuzzaCamera.cc new file mode 100644 index 0000000..3615594 --- /dev/null +++ b/camera_models/src/camera_models/ScaramuzzaCamera.cc @@ -0,0 +1,833 @@ +#include "camodocal/camera_models/ScaramuzzaCamera.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "camodocal/gpl/gpl.h" + + +Eigen::VectorXd polyfit(Eigen::VectorXd& xVec, Eigen::VectorXd& yVec, int poly_order) { + assert(poly_order > 0); + assert(xVec.size() > poly_order); + assert(xVec.size() == yVec.size()); + + Eigen::MatrixXd A(xVec.size(), poly_order+1); + Eigen::VectorXd B(xVec.size()); + + for(int i = 0; i < xVec.size(); ++i) { + const double x = xVec(i); + const double y = yVec(i); + + double x_pow_k = 1.0; + + for(int k=0; k<=poly_order; ++k) { + A(i,k) = x_pow_k; + x_pow_k *= x; + } + + B(i) = y; + } + + Eigen::JacobiSVD svd(A, Eigen::ComputeThinU | Eigen::ComputeThinV); + Eigen::VectorXd x = svd.solve(B); + + return x; +} + +namespace camodocal +{ + +OCAMCamera::Parameters::Parameters() + : Camera::Parameters(SCARAMUZZA) + , m_C(0.0) + , m_D(0.0) + , m_E(0.0) + , m_center_x(0.0) + , m_center_y(0.0) +{ + memset(m_poly, 0, sizeof(double) * SCARAMUZZA_POLY_SIZE); + memset(m_inv_poly, 0, sizeof(double) * SCARAMUZZA_INV_POLY_SIZE); +} + + + +bool +OCAMCamera::Parameters::readFromYamlFile(const std::string& filename) +{ + cv::FileStorage fs(filename, cv::FileStorage::READ); + + if (!fs.isOpened()) + { + return false; + } + + if (!fs["model_type"].isNone()) + { + std::string sModelType; + fs["model_type"] >> sModelType; + + if (!boost::iequals(sModelType, "scaramuzza")) + { + return false; + } + } + + m_modelType = SCARAMUZZA; + fs["camera_name"] >> m_cameraName; + m_imageWidth = static_cast(fs["image_width"]); + m_imageHeight = static_cast(fs["image_height"]); + + cv::FileNode n = fs["poly_parameters"]; + for(int i=0; i < SCARAMUZZA_POLY_SIZE; i++) + m_poly[i] = static_cast(n[std::string("p") + boost::lexical_cast(i)]); + + n = fs["inv_poly_parameters"]; + for(int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) + m_inv_poly[i] = static_cast(n[std::string("p") + boost::lexical_cast(i)]); + + n = fs["affine_parameters"]; + m_C = static_cast(n["ac"]); + m_D = static_cast(n["ad"]); + m_E = static_cast(n["ae"]); + + m_center_x = static_cast(n["cx"]); + m_center_y = static_cast(n["cy"]); + + return true; +} + +void +OCAMCamera::Parameters::writeToYamlFile(const std::string& filename) const +{ + cv::FileStorage fs(filename, cv::FileStorage::WRITE); + + fs << "model_type" << "scaramuzza"; + fs << "camera_name" << m_cameraName; + fs << "image_width" << m_imageWidth; + fs << "image_height" << m_imageHeight; + + fs << "poly_parameters"; + fs << "{"; + for(int i=0; i < SCARAMUZZA_POLY_SIZE; i++) + fs << std::string("p") + boost::lexical_cast(i) << m_poly[i]; + fs << "}"; + + fs << "inv_poly_parameters"; + fs << "{"; + for(int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) + fs << std::string("p") + boost::lexical_cast(i) << m_inv_poly[i]; + fs << "}"; + + fs << "affine_parameters"; + fs << "{" << "ac" << m_C + << "ad" << m_D + << "ae" << m_E + << "cx" << m_center_x + << "cy" << m_center_y << "}"; + + fs.release(); +} + +OCAMCamera::Parameters& +OCAMCamera::Parameters::operator=(const OCAMCamera::Parameters& other) +{ + if (this != &other) + { + m_modelType = other.m_modelType; + m_cameraName = other.m_cameraName; + m_imageWidth = other.m_imageWidth; + m_imageHeight = other.m_imageHeight; + m_C = other.m_C; + m_D = other.m_D; + m_E = other.m_E; + m_center_x = other.m_center_x; + m_center_y = other.m_center_y; + + memcpy(m_poly, other.m_poly, sizeof(double) * SCARAMUZZA_POLY_SIZE); + memcpy(m_inv_poly, other.m_inv_poly, sizeof(double) * SCARAMUZZA_INV_POLY_SIZE); + } + + return *this; +} + +std::ostream& +operator<< (std::ostream& out, const OCAMCamera::Parameters& params) +{ + out << "Camera Parameters:" << std::endl; + out << " model_type " << "scaramuzza" << std::endl; + out << " camera_name " << params.m_cameraName << std::endl; + out << " image_width " << params.m_imageWidth << std::endl; + out << " image_height " << params.m_imageHeight << std::endl; + + out << std::fixed << std::setprecision(10); + + out << "Poly Parameters" << std::endl; + for(int i=0; i < SCARAMUZZA_POLY_SIZE; i++) + out << std::string("p") + boost::lexical_cast(i) << ": " << params.m_poly[i] << std::endl; + + out << "Inverse Poly Parameters" << std::endl; + for(int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) + out << std::string("p") + boost::lexical_cast(i) << ": " << params.m_inv_poly[i] << std::endl; + + out << "Affine Parameters" << std::endl; + out << " ac " << params.m_C << std::endl + << " ad " << params.m_D << std::endl + << " ae " << params.m_E << std::endl; + out << " cx " << params.m_center_x << std::endl + << " cy " << params.m_center_y << std::endl; + + return out; +} + +OCAMCamera::OCAMCamera() + : m_inv_scale(0.0) +{ + +} + +OCAMCamera::OCAMCamera(const OCAMCamera::Parameters& params) + : mParameters(params) +{ + m_inv_scale = 1.0 / (params.C() - params.D() * params.E()); +} + +Camera::ModelType +OCAMCamera::modelType(void) const +{ + return mParameters.modelType(); +} + +const std::string& +OCAMCamera::cameraName(void) const +{ + return mParameters.cameraName(); +} + +int +OCAMCamera::imageWidth(void) const +{ + return mParameters.imageWidth(); +} + +int +OCAMCamera::imageHeight(void) const +{ + return mParameters.imageHeight(); +} + +void +OCAMCamera::estimateIntrinsics(const cv::Size& boardSize, + const std::vector< std::vector >& objectPoints, + const std::vector< std::vector >& imagePoints) +{ + // std::cout << "OCAMCamera::estimateIntrinsics - NOT IMPLEMENTED" << std::endl; + // throw std::string("OCAMCamera::estimateIntrinsics - NOT IMPLEMENTED"); + + // Reference: Page 30 of + // " Scaramuzza, D. Omnidirectional Vision: from Calibration to Robot Motion Estimation, ETH Zurich. Thesis no. 17635." + // http://e-collection.library.ethz.ch/eserv/eth:30301/eth-30301-02.pdf + // Matlab code: calibrate.m + + // First, estimate every image's extrinsics parameters + std::vector< Eigen::Matrix3d > RList; + std::vector< Eigen::Vector3d > TList; + + RList.reserve(imagePoints.size()); + TList.reserve(imagePoints.size()); + + // i-th image + for (size_t image_index = 0; image_index < imagePoints.size(); ++image_index) + { + const std::vector& objPts = objectPoints.at(image_index); + const std::vector& imgPts = imagePoints.at(image_index); + + assert(objPts.size() == imgPts.size()); + assert(objPts.size() == static_cast(boardSize.width * boardSize.height)); + + Eigen::MatrixXd M(objPts.size(), 6); + + for(size_t corner_index = 0; corner_index < objPts.size(); ++corner_index) { + double X = objPts.at(corner_index).x; + double Y = objPts.at(corner_index).y; + assert(objPts.at(corner_index).z==0.0); + + double u = imgPts.at(corner_index).x; + double v = imgPts.at(corner_index).y; + + M(corner_index, 0) = -v * X; + M(corner_index, 1) = -v * Y; + M(corner_index, 2) = u * X; + M(corner_index, 3) = u * Y; + M(corner_index, 4) = -v; + M(corner_index, 5) = u; + } + + Eigen::JacobiSVD svd(M, Eigen::ComputeFullU | Eigen::ComputeFullV); + assert(svd.matrixV().cols() == 6); + Eigen::VectorXd h = -svd.matrixV().col(5); + + // scaled version of R and T + const double sr11 = h(0); + const double sr12 = h(1); + const double sr21 = h(2); + const double sr22 = h(3); + const double st1 = h(4); + const double st2 = h(5); + + const double AA = square(sr11*sr12 + sr21*sr22); + const double BB = square(sr11) + square(sr21); + const double CC = square(sr12) + square(sr22); + + const double sr32_squared_1 = (- (CC-BB) + sqrt(square(CC-BB) + 4.0 * AA)) / 2.0; + const double sr32_squared_2 = (- (CC-BB) - sqrt(square(CC-BB) + 4.0 * AA)) / 2.0; + +// printf("rst = %.12f\n", sr32_squared_1*sr32_squared_1 + (CC-BB)*sr32_squared_1 - AA); + + std::vector sr32_squared_values; + if (sr32_squared_1 > 0) sr32_squared_values.push_back(sr32_squared_1); + if (sr32_squared_2 > 0) sr32_squared_values.push_back(sr32_squared_2); + assert(!sr32_squared_values.empty()); + + std::vector sr32_values; + std::vector sr31_values; + for (auto sr32_squared : sr32_squared_values) { + for(int sign = -1; sign <= 1; sign += 2) { + const double sr32 = static_cast(sign) * std::sqrt(sr32_squared); + sr32_values.push_back( sr32 ); + if (sr32_squared == 0.0) { + // sr31 can be calculated through norm equality, + // but it has positive and negative posibilities + // positive one + sr31_values.push_back(std::sqrt(CC-BB)); + // negative one + sr32_values.push_back( sr32 ); + sr31_values.push_back(-std::sqrt(CC-BB)); + + break; // skip the same situation + } else { + // sr31 can be calculated throught dot product == 0 + sr31_values.push_back(- (sr11*sr12 + sr21*sr22) / sr32); + } + } + } + + // std::cout << "h= " << std::setprecision(12) << h.transpose() << std::endl; + // std::cout << "length: " << sr32_values.size() << " & " << sr31_values.size() << std::endl; + + assert(!sr31_values.empty()); + assert(sr31_values.size() == sr32_values.size()); + + std::vector H_values; + for(size_t i=0;i(v,u) = p(0); + mapY.at(v,u) = p(1); + } + } + + cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); +} +#endif + +cv::Mat +OCAMCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, + float fx, float fy, + cv::Size imageSize, + float cx, float cy, + cv::Mat rmat) const +{ + if (imageSize == cv::Size(0, 0)) + { + imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight()); + } + + cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); + cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F); + + Eigen::Matrix3f K_rect; + + K_rect << fx, 0, cx < 0 ? imageSize.width / 2 : cx, + 0, fy, cy < 0 ? imageSize.height / 2 : cy, + 0, 0, 1; + + if (fx < 0 || fy < 0) + { + throw std::string(std::string(__FUNCTION__) + ": Focal length must be specified"); + } + + Eigen::Matrix3f K_rect_inv = K_rect.inverse(); + + Eigen::Matrix3f R, R_inv; + cv::cv2eigen(rmat, R); + R_inv = R.inverse(); + + for (int v = 0; v < imageSize.height; ++v) + { + for (int u = 0; u < imageSize.width; ++u) + { + Eigen::Vector3f xo; + xo << u, v, 1; + + Eigen::Vector3f uo = R_inv * K_rect_inv * xo; + + Eigen::Vector2d p; + spaceToPlane(uo.cast(), p); + + mapX.at(v,u) = p(0); + mapY.at(v,u) = p(1); + } + } + + cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false); + + cv::Mat K_rect_cv; + cv::eigen2cv(K_rect, K_rect_cv); + return K_rect_cv; +} + +int +OCAMCamera::parameterCount(void) const +{ + return SCARAMUZZA_CAMERA_NUM_PARAMS; +} + +const OCAMCamera::Parameters& +OCAMCamera::getParameters(void) const +{ + return mParameters; +} + +void +OCAMCamera::setParameters(const OCAMCamera::Parameters& parameters) +{ + mParameters = parameters; + + m_inv_scale = 1.0 / (parameters.C() - parameters.D() * parameters.E()); +} + +void +OCAMCamera::readParameters(const std::vector& parameterVec) +{ + if ((int)parameterVec.size() != parameterCount()) + { + return; + } + + Parameters params = getParameters(); + + params.C() = parameterVec.at(0); + params.D() = parameterVec.at(1); + params.E() = parameterVec.at(2); + params.center_x() = parameterVec.at(3); + params.center_y() = parameterVec.at(4); + for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++) + params.poly(i) = parameterVec.at(5+i); + for (int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) + params.inv_poly(i) = parameterVec.at(5 + SCARAMUZZA_POLY_SIZE + i); + + setParameters(params); +} + +void +OCAMCamera::writeParameters(std::vector& parameterVec) const +{ + parameterVec.resize(parameterCount()); + parameterVec.at(0) = mParameters.C(); + parameterVec.at(1) = mParameters.D(); + parameterVec.at(2) = mParameters.E(); + parameterVec.at(3) = mParameters.center_x(); + parameterVec.at(4) = mParameters.center_y(); + for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++) + parameterVec.at(5+i) = mParameters.poly(i); + for (int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++) + parameterVec.at(5 + SCARAMUZZA_POLY_SIZE + i) = mParameters.inv_poly(i); +} + +void +OCAMCamera::writeParametersToYamlFile(const std::string& filename) const +{ + mParameters.writeToYamlFile(filename); +} + +std::string +OCAMCamera::parametersToString(void) const +{ + std::ostringstream oss; + oss << mParameters; + + return oss.str(); +} + +} diff --git a/camera_models/src/chessboard/Chessboard.cc b/camera_models/src/chessboard/Chessboard.cc new file mode 100644 index 0000000..4c0a761 --- /dev/null +++ b/camera_models/src/chessboard/Chessboard.cc @@ -0,0 +1,1982 @@ +#include "camodocal/chessboard/Chessboard.h" + +#include +#include + +#include "camodocal/chessboard/ChessboardQuad.h" +#include "camodocal/chessboard/Spline.h" + +#define MAX_CONTOUR_APPROX 7 + +namespace camodocal +{ + +Chessboard::Chessboard(cv::Size boardSize, cv::Mat& image) + : mBoardSize(boardSize) + , mCornersFound(false) +{ + if (image.channels() == 1) + { + cv::cvtColor(image, mSketch, CV_GRAY2BGR); + image.copyTo(mImage); + } + else + { + image.copyTo(mSketch); + cv::cvtColor(image, mImage, CV_BGR2GRAY); + } +} + +void +Chessboard::findCorners(bool useOpenCV) +{ + mCornersFound = findChessboardCorners(mImage, mBoardSize, mCorners, + CV_CALIB_CB_ADAPTIVE_THRESH + + CV_CALIB_CB_NORMALIZE_IMAGE + + CV_CALIB_CB_FILTER_QUADS + + CV_CALIB_CB_FAST_CHECK, + useOpenCV); + + if (mCornersFound) + { + // draw chessboard corners + cv::drawChessboardCorners(mSketch, mBoardSize, mCorners, mCornersFound); + } +} + +const std::vector& +Chessboard::getCorners(void) const +{ + return mCorners; +} + +bool +Chessboard::cornersFound(void) const +{ + return mCornersFound; +} + +const cv::Mat& +Chessboard::getImage(void) const +{ + return mImage; +} + +const cv::Mat& +Chessboard::getSketch(void) const +{ + return mSketch; +} + +bool +Chessboard::findChessboardCorners(const cv::Mat& image, + const cv::Size& patternSize, + std::vector& corners, + int flags, bool useOpenCV) +{ + if (useOpenCV) + { + return cv::findChessboardCorners(image, patternSize, corners, flags); + } + else + { + return findChessboardCornersImproved(image, patternSize, corners, flags); + } +} + +bool +Chessboard::findChessboardCornersImproved(const cv::Mat& image, + const cv::Size& patternSize, + std::vector& corners, + int flags) +{ + /************************************************************************************\ + This is improved variant of chessboard corner detection algorithm that + uses a graph of connected quads. It is based on the code contributed + by Vladimir Vezhnevets and Philip Gruebele. + Here is the copyright notice from the original Vladimir's code: + =============================================================== + + The algorithms developed and implemented by Vezhnevets Vldimir + aka Dead Moroz (vvp@graphics.cs.msu.ru) + See http://graphics.cs.msu.su/en/research/calibration/opencv.html + for detailed information. + + Reliability additions and modifications made by Philip Gruebele. + pgruebele@cox.net + + Some improvements were made by: + 1) Martin Rufli - increased chance of correct corner matching + Rufli, M., Scaramuzza, D., and Siegwart, R. (2008), + Automatic Detection of Checkerboards on Blurred and Distorted Images, + Proceedings of the IEEE/RSJ International Conference on + Intelligent Robots and Systems (IROS 2008), Nice, France, September 2008. + 2) Lionel Heng - post-detection checks and better thresholding + + \************************************************************************************/ + + //int bestDilation = -1; + const int minDilations = 0; + const int maxDilations = 7; + + std::vector outputQuadGroup; + + if (image.depth() != CV_8U || image.channels() == 2) + { + return false; + } + + if (patternSize.width < 2 || patternSize.height < 2) + { + return false; + } + + if (patternSize.width > 127 || patternSize.height > 127) + { + return false; + } + + cv::Mat img = image; + + // Image histogram normalization and + // BGR to Grayscale image conversion (if applicable) + // MARTIN: Set to "false" + if (image.channels() != 1 || (flags & CV_CALIB_CB_NORMALIZE_IMAGE)) + { + cv::Mat norm_img(image.rows, image.cols, CV_8UC1); + + if (image.channels() != 1) + { + cv::cvtColor(image, norm_img, CV_BGR2GRAY); + img = norm_img; + } + + if (flags & CV_CALIB_CB_NORMALIZE_IMAGE) + { + cv::equalizeHist(image, norm_img); + img = norm_img; + } + } + + if (flags & CV_CALIB_CB_FAST_CHECK) + { + if (!checkChessboard(img, patternSize)) + { + return false; + } + } + + // PART 1: FIND LARGEST PATTERN + //----------------------------------------------------------------------- + // Checker patterns are tried to be found by dilating the background and + // then applying a canny edge finder on the closed contours (checkers). + // Try one dilation run, but if the pattern is not found, repeat until + // max_dilations is reached. + + int prevSqrSize = 0; + bool found = false; + std::vector outputCorners; + + for (int k = 0; k < 6; ++k) + { + for (int dilations = minDilations; dilations <= maxDilations; ++dilations) + { + if (found) + { + break; + } + + cv::Mat thresh_img; + + // convert the input grayscale image to binary (black-n-white) + if (flags & CV_CALIB_CB_ADAPTIVE_THRESH) + { + int blockSize = lround(prevSqrSize == 0 ? + std::min(img.cols,img.rows)*(k%2 == 0 ? 0.2 : 0.1): prevSqrSize*2)|1; + + // convert to binary + cv::adaptiveThreshold(img, thresh_img, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, blockSize, (k/2)*5); + } + else + { + // empiric threshold level + double mean = (cv::mean(img))[0]; + int thresh_level = lround(mean - 10); + thresh_level = std::max(thresh_level, 10); + + cv::threshold(img, thresh_img, thresh_level, 255, CV_THRESH_BINARY); + } + + // MARTIN's Code + // Use both a rectangular and a cross kernel. In this way, a more + // homogeneous dilation is performed, which is crucial for small, + // distorted checkers. Use the CROSS kernel first, since its action + // on the image is more subtle + cv::Mat kernel1 = cv::getStructuringElement(CV_SHAPE_CROSS, cv::Size(3,3), cv::Point(1,1)); + cv::Mat kernel2 = cv::getStructuringElement(CV_SHAPE_RECT, cv::Size(3,3), cv::Point(1,1)); + + if (dilations >= 1) + cv::dilate(thresh_img, thresh_img, kernel1); + if (dilations >= 2) + cv::dilate(thresh_img, thresh_img, kernel2); + if (dilations >= 3) + cv::dilate(thresh_img, thresh_img, kernel1); + if (dilations >= 4) + cv::dilate(thresh_img, thresh_img, kernel2); + if (dilations >= 5) + cv::dilate(thresh_img, thresh_img, kernel1); + if (dilations >= 6) + cv::dilate(thresh_img, thresh_img, kernel2); + + // In order to find rectangles that go to the edge, we draw a white + // line around the image edge. Otherwise FindContours will miss those + // clipped rectangle contours. The border color will be the image mean, + // because otherwise we risk screwing up filters like cvSmooth() + cv::rectangle(thresh_img, cv::Point(0,0), + cv::Point(thresh_img.cols - 1, thresh_img.rows - 1), + CV_RGB(255,255,255), 3, 8); + + // Generate quadrangles in the following function + std::vector quads; + + generateQuads(quads, thresh_img, flags, dilations, true); + if (quads.empty()) + { + continue; + } + + // The following function finds and assigns neighbor quads to every + // quadrangle in the immediate vicinity fulfilling certain + // prerequisites + findQuadNeighbors(quads, dilations); + + // The connected quads will be organized in groups. The following loop + // increases a "group_idx" identifier. + // The function "findConnectedQuads assigns all connected quads + // a unique group ID. + // If more quadrangles were assigned to a given group (i.e. connected) + // than are expected by the input variable "patternSize", the + // function "cleanFoundConnectedQuads" erases the surplus + // quadrangles by minimizing the convex hull of the remaining pattern. + + for (int group_idx = 0; ; ++group_idx) + { + std::vector quadGroup; + + findConnectedQuads(quads, quadGroup, group_idx, dilations); + + if (quadGroup.empty()) + { + break; + } + + cleanFoundConnectedQuads(quadGroup, patternSize); + + // The following function labels all corners of every quad + // with a row and column entry. + // "count" specifies the number of found quads in "quad_group" + // with group identifier "group_idx" + // The last parameter is set to "true", because this is the + // first function call and some initializations need to be + // made. + labelQuadGroup(quadGroup, patternSize, true); + + found = checkQuadGroup(quadGroup, outputCorners, patternSize); + + float sumDist = 0; + int total = 0; + + for (int i = 0; i < (int)outputCorners.size(); ++i) + { + int ni = 0; + float avgi = outputCorners.at(i)->meanDist(ni); + sumDist += avgi * ni; + total += ni; + } + prevSqrSize = lround(sumDist / std::max(total, 1)); + + if (found && !checkBoardMonotony(outputCorners, patternSize)) + { + found = false; + } + } + } + } + + if (!found) + { + return false; + } + else + { + corners.clear(); + corners.reserve(outputCorners.size()); + for (size_t i = 0; i < outputCorners.size(); ++i) + { + corners.push_back(outputCorners.at(i)->pt); + } + + cv::cornerSubPix(image, corners, cv::Size(11, 11), cv::Size(-1,-1), + cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); + + return true; + } +} + +//=========================================================================== +// ERASE OVERHEAD +//=========================================================================== +// If we found too many connected quads, remove those which probably do not +// belong. +void +Chessboard::cleanFoundConnectedQuads(std::vector& quadGroup, + cv::Size patternSize) +{ + cv::Point2f center(0.0f, 0.0f); + + // Number of quads this pattern should contain + int count = ((patternSize.width + 1)*(patternSize.height + 1) + 1)/2; + + // If we have more quadrangles than we should, try to eliminate duplicates + // or ones which don't belong to the pattern rectangle. Else go to the end + // of the function + if ((int)quadGroup.size() <= count) + { + return; + } + + // Create an array of quadrangle centers + std::vector centers; + centers.resize(quadGroup.size()); + + for (size_t i = 0; i < quadGroup.size(); ++i) + { + cv::Point2f ci(0.0f, 0.0f); + ChessboardQuadPtr& q = quadGroup[i]; + + for (int j = 0; j < 4; ++j) + { + ci += q->corners[j]->pt; + } + + ci *= 0.25f; + + // Centers(i), is the geometric center of quad(i) + // Center, is the center of all found quads + centers[i] = ci; + + center += ci; + } + + center *= 1.0f / quadGroup.size(); + + // If we have more quadrangles than we should, we try to eliminate bad + // ones based on minimizing the bounding box. We iteratively remove the + // point which reduces the size of the bounding box of the blobs the most + // (since we want the rectangle to be as small as possible) remove the + // quadrange that causes the biggest reduction in pattern size until we + // have the correct number + while ((int)quadGroup.size() > count) + { + double minBoxArea = DBL_MAX; + int minBoxAreaIndex = -1; + + // For each point, calculate box area without that point + for (size_t skip = 0; skip < quadGroup.size(); ++skip) + { + // get bounding rectangle + cv::Point2f temp = centers[skip]; + centers[skip] = center; + + std::vector hull; + cv::convexHull(centers, hull, true, true); + centers[skip] = temp; + + double hull_area = fabs(cv::contourArea(hull)); + + // remember smallest box area + if (hull_area < minBoxArea) + { + minBoxArea = hull_area; + minBoxAreaIndex = skip; + } + } + + ChessboardQuadPtr& q0 = quadGroup[minBoxAreaIndex]; + + // remove any references to this quad as a neighbor + for (size_t i = 0; i < quadGroup.size(); ++i) + { + ChessboardQuadPtr& q = quadGroup.at(i); + for (int j = 0; j < 4; ++j) + { + if (q->neighbors[j] == q0) + { + q->neighbors[j].reset(); + q->count--; + for (int k = 0; k < 4; ++k) + { + if (q0->neighbors[k] == q) + { + q0->neighbors[k].reset(); + q0->count--; + break; + } + } + break; + } + } + } + // remove the quad + quadGroup.at(minBoxAreaIndex) = quadGroup.back(); + centers.at(minBoxAreaIndex) = centers.back(); + quadGroup.pop_back(); + centers.pop_back(); + } +} + +//=========================================================================== +// FIND COONECTED QUADS +//=========================================================================== +void +Chessboard::findConnectedQuads(std::vector& quads, + std::vector& group, + int group_idx, int dilation) +{ + ChessboardQuadPtr q; + + // Scan the array for a first unlabeled quad + for (size_t i = 0; i < quads.size(); ++i) + { + ChessboardQuadPtr& quad = quads.at(i); + + if (quad->count > 0 && quad->group_idx < 0) + { + q = quad; + break; + } + } + + if (q.get() == 0) + { + return; + } + + // Recursively find a group of connected quads starting from the seed quad + + std::vector stack; + stack.push_back(q); + + group.push_back(q); + q->group_idx = group_idx; + + while (!stack.empty()) + { + q = stack.back(); + stack.pop_back(); + + for (int i = 0; i < 4; ++i) + { + ChessboardQuadPtr& neighbor = q->neighbors[i]; + + // If he neighbor exists and the neighbor has more than 0 + // neighbors and the neighbor has not been classified yet. + if (neighbor.get() && neighbor->count > 0 && neighbor->group_idx < 0) + { + stack.push_back(neighbor); + group.push_back(neighbor); + neighbor->group_idx = group_idx; + } + } + } +} + +void +Chessboard::labelQuadGroup(std::vector& quadGroup, + cv::Size patternSize, bool firstRun) +{ + // If this is the first function call, a seed quad needs to be selected + if (firstRun) + { + // Search for the (first) quad with the maximum number of neighbors + // (usually 4). This will be our starting point. + int mark = -1; + int maxNeighborCount = 0; + for (size_t i = 0; i < quadGroup.size(); ++i) + { + ChessboardQuadPtr& q = quadGroup.at(i); + if (q->count > maxNeighborCount) + { + mark = i; + maxNeighborCount = q->count; + + if (maxNeighborCount == 4) + { + break; + } + } + } + + // Mark the starting quad's (per definition) upper left corner with + //(0,0) and then proceed clockwise + // The following labeling sequence enshures a "right coordinate system" + ChessboardQuadPtr& q = quadGroup.at(mark); + + q->labeled = true; + + q->corners[0]->row = 0; + q->corners[0]->column = 0; + q->corners[1]->row = 0; + q->corners[1]->column = 1; + q->corners[2]->row = 1; + q->corners[2]->column = 1; + q->corners[3]->row = 1; + q->corners[3]->column = 0; + } + + + // Mark all other corners with their respective row and column + bool flagChanged = true; + while (flagChanged) + { + // First reset the flag to "false" + flagChanged = false; + + // Go through all quads top down is faster, since unlabeled quads will + // be inserted at the end of the list + for (int i = quadGroup.size() - 1; i >= 0; --i) + { + ChessboardQuadPtr& quad = quadGroup.at(i); + + // Check whether quad "i" has been labeled already + if (!quad->labeled) + { + // Check its neighbors, whether some of them have been labeled + // already + for (int j = 0; j < 4; j++) + { + // Check whether the neighbor exists (i.e. is not the NULL + // pointer) + if (quad->neighbors[j]) + { + ChessboardQuadPtr& quadNeighbor = quad->neighbors[j]; + + // Only proceed, if neighbor "j" was labeled + if (quadNeighbor->labeled) + { + // For every quad it could happen to pass here + // multiple times. We therefore "break" later. + // Check whitch of the neighbors corners is + // connected to the current quad + int connectedNeighborCornerId = -1; + for (int k = 0; k < 4; ++k) + { + if (quadNeighbor->neighbors[k] == quad) + { + connectedNeighborCornerId = k; + + // there is only one, therefore + break; + } + } + + + // For the following calculations we need the row + // and column of the connected neighbor corner and + // all other corners of the connected quad "j", + // clockwise (CW) + ChessboardCornerPtr& conCorner = quadNeighbor->corners[connectedNeighborCornerId]; + ChessboardCornerPtr& conCornerCW1 = quadNeighbor->corners[(connectedNeighborCornerId+1)%4]; + ChessboardCornerPtr& conCornerCW2 = quadNeighbor->corners[(connectedNeighborCornerId+2)%4]; + ChessboardCornerPtr& conCornerCW3 = quadNeighbor->corners[(connectedNeighborCornerId+3)%4]; + + quad->corners[j]->row = conCorner->row; + quad->corners[j]->column = conCorner->column; + quad->corners[(j+1)%4]->row = conCorner->row - conCornerCW2->row + conCornerCW3->row; + quad->corners[(j+1)%4]->column = conCorner->column - conCornerCW2->column + conCornerCW3->column; + quad->corners[(j+2)%4]->row = conCorner->row + conCorner->row - conCornerCW2->row; + quad->corners[(j+2)%4]->column = conCorner->column + conCorner->column - conCornerCW2->column; + quad->corners[(j+3)%4]->row = conCorner->row - conCornerCW2->row + conCornerCW1->row; + quad->corners[(j+3)%4]->column = conCorner->column - conCornerCW2->column + conCornerCW1->column; + + // Mark this quad as labeled + quad->labeled = true; + + // Changes have taken place, set the flag + flagChanged = true; + + // once is enough! + break; + } + } + } + } + } + } + + + // All corners are marked with row and column + // Record the minimal and maximal row and column indices + // It is unlikely that more than 8bit checkers are used per dimension, if there are + // an error would have been thrown at the beginning of "cvFindChessboardCorners2" + int min_row = 127; + int max_row = -127; + int min_column = 127; + int max_column = -127; + + for (int i = 0; i < (int)quadGroup.size(); ++i) + { + ChessboardQuadPtr& q = quadGroup.at(i); + + for (int j = 0; j < 4; ++j) + { + ChessboardCornerPtr& c = q->corners[j]; + + if (c->row > max_row) + { + max_row = c->row; + } + if (c->row < min_row) + { + min_row = c->row; + } + if (c->column > max_column) + { + max_column = c->column; + } + if (c->column < min_column) + { + min_column = c->column; + } + } + } + + // Label all internal corners with "needsNeighbor" = false + // Label all external corners with "needsNeighbor" = true, + // except if in a given dimension the pattern size is reached + for (int i = min_row; i <= max_row; ++i) + { + for (int j = min_column; j <= max_column; ++j) + { + // A flag that indicates, whether a row/column combination is + // executed multiple times + bool flag = false; + + // Remember corner and quad + int cornerID; + int quadID; + + for (int k = 0; k < (int)quadGroup.size(); ++k) + { + ChessboardQuadPtr& q = quadGroup.at(k); + + for (int l = 0; l < 4; ++l) + { + if ((q->corners[l]->row == i) && (q->corners[l]->column == j)) + { + if (flag) + { + // Passed at least twice through here + q->corners[l]->needsNeighbor = false; + quadGroup[quadID]->corners[cornerID]->needsNeighbor = false; + } + else + { + // Mark with needs a neighbor, but note the + // address + q->corners[l]->needsNeighbor = true; + cornerID = l; + quadID = k; + } + + // set the flag to true + flag = true; + } + } + } + } + } + + // Complete Linking: + // sometimes not all corners were properly linked in "findQuadNeighbors", + // but after labeling each corner with its respective row and column, it is + // possible to match them anyway. + for (int i = min_row; i <= max_row; ++i) + { + for (int j = min_column; j <= max_column; ++j) + { + // the following "number" indicates the number of corners which + // correspond to the given (i,j) value + // 1 is a border corner or a conrer which still needs a neighbor + // 2 is a fully connected internal corner + // >2 something went wrong during labeling, report a warning + int number = 1; + + // remember corner and quad + int cornerID; + int quadID; + + for (int k = 0; k < (int)quadGroup.size(); ++k) + { + ChessboardQuadPtr& q = quadGroup.at(k); + + for (int l = 0; l < 4; ++l) + { + if ((q->corners[l]->row == i) && (q->corners[l]->column == j)) + { + + if (number == 1) + { + // First corner, note its ID + cornerID = l; + quadID = k; + } + + else if (number == 2) + { + // Second corner, check wheter this and the + // first one have equal coordinates, else + // interpolate + cv::Point2f delta = q->corners[l]->pt - quadGroup[quadID]->corners[cornerID]->pt; + + if (delta.x != 0.0f || delta.y != 0.0f) + { + // Interpolate + q->corners[l]->pt -= delta * 0.5f; + + quadGroup[quadID]->corners[cornerID]->pt += delta * 0.5f; + } + } + else if (number > 2) + { + // Something went wrong during row/column labeling + // Report a Warning + // ->Implemented in the function "mrWriteCorners" + } + + // increase the number by one + ++number; + } + } + } + } + } + + + // Bordercorners don't need any neighbors, if the pattern size in the + // respective direction is reached + // The only time we can make shure that the target pattern size is reached in a given + // dimension, is when the larger side has reached the target size in the maximal + // direction, or if the larger side is larger than the smaller target size and the + // smaller side equals the smaller target size + int largerDimPattern = std::max(patternSize.height,patternSize.width); + int smallerDimPattern = std::min(patternSize.height,patternSize.width); + bool flagSmallerDim1 = false; + bool flagSmallerDim2 = false; + + if ((largerDimPattern + 1) == max_column - min_column) + { + flagSmallerDim1 = true; + // We found out that in the column direction the target pattern size is reached + // Therefore border column corners do not need a neighbor anymore + // Go through all corners + for (int k = 0; k < (int)quadGroup.size(); ++k) + { + ChessboardQuadPtr& q = quadGroup.at(k); + + for (int l = 0; l < 4; ++l) + { + ChessboardCornerPtr& c = q->corners[l]; + + if (c->column == min_column || c->column == max_column) + { + // Needs no neighbor anymore + c->needsNeighbor = false; + } + } + } + } + + if ((largerDimPattern + 1) == max_row - min_row) + { + flagSmallerDim2 = true; + // We found out that in the column direction the target pattern size is reached + // Therefore border column corners do not need a neighbor anymore + // Go through all corners + for (int k = 0; k < (int)quadGroup.size(); ++k) + { + ChessboardQuadPtr& q = quadGroup.at(k); + + for (int l = 0; l < 4; ++l) + { + ChessboardCornerPtr& c = q->corners[l]; + + if (c->row == min_row || c->row == max_row) + { + // Needs no neighbor anymore + c->needsNeighbor = false; + } + } + } + } + + + // Check the two flags: + // - If one is true and the other false, then the pattern target + // size was reached in in one direction -> We can check, whether the target + // pattern size is also reached in the other direction + // - If both are set to true, then we deal with a square board -> do nothing + // - If both are set to false -> There is a possibility that the larger side is + // larger than the smaller target size -> Check and if true, then check whether + // the other side has the same size as the smaller target size + if ((flagSmallerDim1 == false && flagSmallerDim2 == true)) + { + // Larger target pattern size is in row direction, check wheter smaller target + // pattern size is reached in column direction + if ((smallerDimPattern + 1) == max_column - min_column) + { + for (int k = 0; k < (int)quadGroup.size(); ++k) + { + ChessboardQuadPtr& q = quadGroup.at(k); + + for (int l = 0; l < 4; ++l) + { + ChessboardCornerPtr& c = q->corners[l]; + + if (c->column == min_column || c->column == max_column) + { + // Needs no neighbor anymore + c->needsNeighbor = false; + } + } + } + } + } + + if ((flagSmallerDim1 == true && flagSmallerDim2 == false)) + { + // Larger target pattern size is in column direction, check wheter smaller target + // pattern size is reached in row direction + if ((smallerDimPattern + 1) == max_row - min_row) + { + for (int k = 0; k < (int)quadGroup.size(); ++k) + { + ChessboardQuadPtr& q = quadGroup.at(k); + + for (int l = 0; l < 4; ++l) + { + ChessboardCornerPtr& c = q->corners[l]; + + if (c->row == min_row || c->row == max_row) + { + // Needs no neighbor anymore + c->needsNeighbor = false; + } + } + } + } + } + + if ((flagSmallerDim1 == false && flagSmallerDim2 == false) && smallerDimPattern + 1 < max_column - min_column) + { + // Larger target pattern size is in column direction, check wheter smaller target + // pattern size is reached in row direction + if ((smallerDimPattern + 1) == max_row - min_row) + { + for (int k = 0; k < (int)quadGroup.size(); ++k) + { + ChessboardQuadPtr& q = quadGroup.at(k); + + for (int l = 0; l < 4; ++l) + { + ChessboardCornerPtr& c = q->corners[l]; + + if (c->row == min_row || c->row == max_row) + { + // Needs no neighbor anymore + c->needsNeighbor = false; + } + } + } + } + } + + if ((flagSmallerDim1 == false && flagSmallerDim2 == false) && smallerDimPattern + 1 < max_row - min_row) + { + // Larger target pattern size is in row direction, check wheter smaller target + // pattern size is reached in column direction + if ((smallerDimPattern + 1) == max_column - min_column) + { + for (int k = 0; k < (int)quadGroup.size(); ++k) + { + ChessboardQuadPtr& q = quadGroup.at(k); + + for (int l = 0; l < 4; ++l) + { + ChessboardCornerPtr& c = q->corners[l]; + + if (c->column == min_column || c->column == max_column) + { + // Needs no neighbor anymore + c->needsNeighbor = false; + } + } + } + } + } +} + +//=========================================================================== +// GIVE A GROUP IDX +//=========================================================================== +void +Chessboard::findQuadNeighbors(std::vector& quads, int dilation) +{ + // Thresh dilation is used to counter the effect of dilation on the + // distance between 2 neighboring corners. Since the distance below is + // computed as its square, we do here the same. Additionally, we take the + // conservative assumption that dilation was performed using the 3x3 CROSS + // kernel, which coresponds to the 4-neighborhood. + const float thresh_dilation = (float)(2*dilation+3)*(2*dilation+3)*2; // the "*2" is for the x and y component + // the "3" is for initial corner mismatch + + // Find quad neighbors + for (size_t idx = 0; idx < quads.size(); ++idx) + { + ChessboardQuadPtr& curQuad = quads.at(idx); + + // Go through all quadrangles and label them in groups + // For each corner of this quadrangle + for (int i = 0; i < 4; ++i) + { + float minDist = FLT_MAX; + int closestCornerIdx = -1; + ChessboardQuadPtr closestQuad; + + if (curQuad->neighbors[i]) + { + continue; + } + + cv::Point2f pt = curQuad->corners[i]->pt; + + // Find the closest corner in all other quadrangles + for (size_t k = 0; k < quads.size(); ++k) + { + if (k == idx) + { + continue; + } + + ChessboardQuadPtr& quad = quads.at(k); + + for (int j = 0; j < 4; ++j) + { + // If it already has a neighbor + if (quad->neighbors[j]) + { + continue; + } + + cv::Point2f dp = pt - quad->corners[j]->pt; + float dist = dp.dot(dp); + + // The following "if" checks, whether "dist" is the + // shortest so far and smaller than the smallest + // edge length of the current and target quads + if (dist < minDist && + dist <= (curQuad->edge_len + thresh_dilation) && + dist <= (quad->edge_len + thresh_dilation) ) + { + // Check whether conditions are fulfilled + if (matchCorners(curQuad, i, quad, j)) + { + closestCornerIdx = j; + closestQuad = quad; + minDist = dist; + } + } + } + } + + // Have we found a matching corner point? + if (closestCornerIdx >= 0 && minDist < FLT_MAX) + { + ChessboardCornerPtr closestCorner = closestQuad->corners[closestCornerIdx]; + + // Make sure that the closest quad does not have the current + // quad as neighbor already + bool valid = true; + for (int j = 0; j < 4; ++j) + { + if (closestQuad->neighbors[j] == curQuad) + { + valid = false; + break; + } + } + if (!valid) + { + continue; + } + + // We've found one more corner - remember it + closestCorner->pt = (pt + closestCorner->pt) * 0.5f; + + curQuad->count++; + curQuad->neighbors[i] = closestQuad; + curQuad->corners[i] = closestCorner; + + closestQuad->count++; + closestQuad->neighbors[closestCornerIdx] = curQuad; + closestQuad->corners[closestCornerIdx] = closestCorner; + } + } + } +} + + + +//=========================================================================== +// AUGMENT PATTERN WITH ADDITIONAL QUADS +//=========================================================================== +// The first part of the function is basically a copy of +// "findQuadNeighbors" +// The comparisons between two points and two lines could be computed in their +// own function +int +Chessboard::augmentBestRun(std::vector& candidateQuads, int candidateDilation, + std::vector& existingQuads, int existingDilation) +{ + // thresh dilation is used to counter the effect of dilation on the + // distance between 2 neighboring corners. Since the distance below is + // computed as its square, we do here the same. Additionally, we take the + // conservative assumption that dilation was performed using the 3x3 CROSS + // kernel, which coresponds to the 4-neighborhood. + const float thresh_dilation = (2*candidateDilation+3)*(2*existingDilation+3)*2; // the "*2" is for the x and y component + + // Search all old quads which have a neighbor that needs to be linked + for (size_t idx = 0; idx < existingQuads.size(); ++idx) + { + ChessboardQuadPtr& curQuad = existingQuads.at(idx); + + // For each corner of this quadrangle + for (int i = 0; i < 4; ++i) + { + float minDist = FLT_MAX; + int closestCornerIdx = -1; + ChessboardQuadPtr closestQuad; + + // If curQuad corner[i] is already linked, continue + if (!curQuad->corners[i]->needsNeighbor) + { + continue; + } + + cv::Point2f pt = curQuad->corners[i]->pt; + + // Look for a match in all candidateQuads' corners + for (size_t k = 0; k < candidateQuads.size(); ++k) + { + ChessboardQuadPtr& candidateQuad = candidateQuads.at(k); + + // Only look at unlabeled new quads + if (candidateQuad->labeled) + { + continue; + } + + for (int j = 0; j < 4; ++j) + { + // Only proceed if they are less than dist away from each + // other + cv::Point2f dp = pt - candidateQuad->corners[j]->pt; + float dist = dp.dot(dp); + + if ((dist < minDist) && + dist <= (curQuad->edge_len + thresh_dilation) && + dist <= (candidateQuad->edge_len + thresh_dilation)) + { + if (matchCorners(curQuad, i, candidateQuad, j)) + { + closestCornerIdx = j; + closestQuad = candidateQuad; + minDist = dist; + } + } + } + } + + // Have we found a matching corner point? + if (closestCornerIdx >= 0 && minDist < FLT_MAX) + { + ChessboardCornerPtr closestCorner = closestQuad->corners[closestCornerIdx]; + closestCorner->pt = (pt + closestCorner->pt) * 0.5f; + + // We've found one more corner - remember it + // ATTENTION: write the corner x and y coordinates separately, + // else the crucial row/column entries will be overwritten !!! + curQuad->corners[i]->pt = closestCorner->pt; + curQuad->neighbors[i] = closestQuad; + closestQuad->corners[closestCornerIdx]->pt = closestCorner->pt; + + // Label closest quad as labeled. In this way we exclude it + // being considered again during the next loop iteration + closestQuad->labeled = true; + + // We have a new member of the final pattern, copy it over + ChessboardQuadPtr newQuad(new ChessboardQuad); + newQuad->count = 1; + newQuad->edge_len = closestQuad->edge_len; + newQuad->group_idx = curQuad->group_idx; //the same as the current quad + newQuad->labeled = false; //do it right afterwards + + curQuad->neighbors[i] = newQuad; + + // We only know one neighbor for sure + newQuad->neighbors[closestCornerIdx] = curQuad; + + for (int j = 0; j < 4; j++) + { + newQuad->corners[j].reset(new ChessboardCorner); + newQuad->corners[j]->pt = closestQuad->corners[j]->pt; + } + + existingQuads.push_back(newQuad); + + // Start the function again + return -1; + } + } + } + + // Finished, don't start the function again + return 1; +} + + + +//=========================================================================== +// GENERATE QUADRANGLES +//=========================================================================== +void +Chessboard::generateQuads(std::vector& quads, + cv::Mat& image, int flags, + int dilation, bool firstRun) +{ + // Empirical lower bound for the size of allowable quadrangles. + // MARTIN, modified: Added "*0.1" in order to find smaller quads. + int minSize = lround(image.cols * image.rows * .03 * 0.01 * 0.92 * 0.1); + + std::vector< std::vector > contours; + std::vector hierarchy; + + // Initialize contour retrieving routine + cv::findContours(image, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); + + std::vector< std::vector > quadContours; + + for (size_t i = 0; i < contours.size(); ++i) + { + // Reject contours with a too small perimeter and contours which are + // completely surrounded by another contour + // MARTIN: If this function is called during PART 1, then the parameter "first run" + // is set to "true". This guarantees, that only "nice" squares are detected. + // During PART 2, we allow the polygonal approximation function below to + // approximate more freely, which can result in recognized "squares" that are in + // reality multiple blurred and sticked together squares. + std::vector& contour = contours.at(i); + + if (hierarchy[i][3] == -1 || cv::contourArea(contour) < minSize) + { + continue; + } + + int min_approx_level = 1, max_approx_level; + if (firstRun) + { + max_approx_level = 3; + } + else + { + max_approx_level = MAX_CONTOUR_APPROX; + } + + std::vector approxContour; + for (int approx_level = min_approx_level; approx_level <= max_approx_level; approx_level++) + { + cv::approxPolyDP(contour, approxContour, approx_level, true); + + if (approxContour.size() == 4) + { + break; + } + } + + // Reject non-quadrangles + if (approxContour.size() == 4 && cv::isContourConvex(approxContour)) + { + double p = cv::arcLength(approxContour, true); + double area = cv::contourArea(approxContour); + + cv::Point pt[4]; + for (int i = 0; i < 4; i++) + { + pt[i] = approxContour[i]; + } + + cv::Point dp = pt[0] - pt[2]; + double d1 = sqrt(dp.dot(dp)); + + dp = pt[1] - pt[3]; + double d2 = sqrt(dp.dot(dp)); + + // PHILIPG: Only accept those quadrangles which are more + // square than rectangular and which are big enough + dp = pt[0] - pt[1]; + double d3 = sqrt(dp.dot(dp)); + dp = pt[1] - pt[2]; + double d4 = sqrt(dp.dot(dp)); + + if (!(flags & CV_CALIB_CB_FILTER_QUADS) || + (d3*4 > d4 && d4*4 > d3 && d3*d4 < area*1.5 && area > minSize && + d1 >= 0.15 * p && d2 >= 0.15 * p)) + { + quadContours.push_back(approxContour); + } + } + } + + // Allocate quad & corner buffers + quads.resize(quadContours.size()); + + // Create array of quads structures + for (size_t idx = 0; idx < quadContours.size(); ++idx) + { + ChessboardQuadPtr& q = quads.at(idx); + std::vector& contour = quadContours.at(idx); + + // Reset group ID + q.reset(new ChessboardQuad); + assert(contour.size() == 4); + + for (int i = 0; i < 4; ++i) + { + cv::Point2f pt = contour.at(i); + ChessboardCornerPtr corner(new ChessboardCorner); + corner->pt = pt; + q->corners[i] = corner; + } + + for (int i = 0; i < 4; ++i) + { + cv::Point2f dp = q->corners[i]->pt - q->corners[(i+1)&3]->pt; + float d = dp.dot(dp); + if (q->edge_len > d) + { + q->edge_len = d; + } + } + } +} + +bool +Chessboard::checkQuadGroup(std::vector& quads, + std::vector& corners, + cv::Size patternSize) +{ + // Initialize + bool flagRow = false; + bool flagColumn = false; + int height = -1; + int width = -1; + + // Compute the minimum and maximum row / column ID + // (it is unlikely that more than 8bit checkers are used per dimension) + int min_row = 127; + int max_row = -127; + int min_col = 127; + int max_col = -127; + + for (size_t i = 0; i < quads.size(); ++i) + { + ChessboardQuadPtr& q = quads.at(i); + + for (int j = 0; j < 4; ++j) + { + ChessboardCornerPtr& c = q->corners[j]; + + if (c->row > max_row) + { + max_row = c->row; + } + if (c->row < min_row) + { + min_row = c->row; + } + if (c->column > max_col) + { + max_col = c->column; + } + if (c->column < min_col) + { + min_col = c->column; + } + } + } + + // If in a given direction the target pattern size is reached, we know exactly how + // the checkerboard is oriented. + // Else we need to prepare enough "dummy" corners for the worst case. + for (size_t i = 0; i < quads.size(); ++i) + { + ChessboardQuadPtr& q = quads.at(i); + + for (int j = 0; j < 4; ++j) + { + ChessboardCornerPtr& c = q->corners[j]; + + if (c->column == max_col && c->row != min_row && c->row != max_row && !c->needsNeighbor) + { + flagColumn = true; + } + if (c->row == max_row && c->column != min_col && c->column != max_col && !c->needsNeighbor) + { + flagRow = true; + } + } + } + + if (flagColumn) + { + if (max_col - min_col == patternSize.width + 1) + { + width = patternSize.width; + height = patternSize.height; + } + else + { + width = patternSize.height; + height = patternSize.width; + } + } + else if (flagRow) + { + if (max_row - min_row == patternSize.width + 1) + { + height = patternSize.width; + width = patternSize.height; + } + else + { + height = patternSize.height; + width = patternSize.width; + } + } + else + { + // If target pattern size is not reached in at least one of the two + // directions, then we do not know where the remaining corners are + // located. Account for this. + width = std::max(patternSize.width, patternSize.height); + height = std::max(patternSize.width, patternSize.height); + } + + ++min_row; + ++min_col; + max_row = min_row + height - 1; + max_col = min_col + width - 1; + + corners.clear(); + + int linkedBorderCorners = 0; + + // Write the corners in increasing order to the output file + for (int i = min_row; i <= max_row; ++i) + { + for (int j = min_col; j <= max_col; ++j) + { + // Reset the iterator + int iter = 1; + + for (int k = 0; k < (int)quads.size(); ++k) + { + ChessboardQuadPtr& quad = quads.at(k); + + for (int l = 0; l < 4; ++l) + { + ChessboardCornerPtr& c = quad->corners[l]; + + if (c->row == i && c->column == j) + { + bool boardEdge = false; + if (i == min_row || i == max_row || + j == min_col || j == max_col) + { + boardEdge = true; + } + + if ((iter == 1 && boardEdge) || (iter == 2 && !boardEdge)) + { + // The respective row and column have been found + corners.push_back(quad->corners[l]); + } + + if (iter == 2 && boardEdge) + { + ++linkedBorderCorners; + } + + // If the iterator is larger than two, this means that more than + // two corners have the same row / column entries. Then some + // linking errors must have occured and we should not use the found + // pattern + if (iter > 2) + { + return false; + } + + iter++; + } + } + } + } + } + + if ((int)corners.size() != patternSize.width * patternSize.height || + linkedBorderCorners < (patternSize.width * 2 + patternSize.height * 2 - 2) * 0.75f) + { + return false; + } + + // check that no corners lie at image boundary + float border = 5.0f; + for (int i = 0; i < (int)corners.size(); ++i) + { + ChessboardCornerPtr& c = corners.at(i); + + if (c->pt.x < border || c->pt.x > mImage.cols - border || + c->pt.y < border || c->pt.y > mImage.rows - border) + { + return false; + } + } + + // check if we need to transpose the board + if (width != patternSize.width) + { + std::swap(width, height); + + std::vector outputCorners; + outputCorners.resize(corners.size()); + + for (int i = 0; i < height; ++i) + { + for (int j = 0; j < width; ++j) + { + outputCorners.at(i * width + j) = corners.at(j * height + i); + } + } + + corners = outputCorners; + } + + // check if we need to revert the order in each row + cv::Point2f p0 = corners.at(0)->pt; + cv::Point2f p1 = corners.at(width-1)->pt; + cv::Point2f p2 = corners.at(width)->pt; + + if ((p1 - p0).cross(p2 - p0) < 0.0f) + { + for (int i = 0; i < height; ++i) + { + for (int j = 0; j < width / 2; ++j) + { + std::swap(corners.at(i * width + j), corners.at(i * width + width - j - 1)); + } + } + } + + p0 = corners.at(0)->pt; + p2 = corners.at(width)->pt; + + // check if we need to rotate the board + if (p2.y < p0.y) + { + std::vector outputCorners; + outputCorners.resize(corners.size()); + + for (int i = 0; i < height; ++i) + { + for (int j = 0; j < width; ++j) + { + outputCorners.at(i * width + j) = corners.at((height - i - 1) * width + width - j - 1); + } + } + + corners = outputCorners; + } + + return true; +} + +void +Chessboard::getQuadrangleHypotheses(const std::vector< std::vector >& contours, + std::vector< std::pair >& quads, + int classId) const +{ + const float minAspectRatio = 0.2f; + const float maxAspectRatio = 5.0f; + const float minBoxSize = 10.0f; + + for (size_t i = 0; i < contours.size(); ++i) + { + cv::RotatedRect box = cv::minAreaRect(contours.at(i)); + float boxSize = std::max(box.size.width, box.size.height); + if (boxSize < minBoxSize) + { + continue; + } + + float aspectRatio = box.size.width / std::max(box.size.height, 1.0f); + + if (aspectRatio < minAspectRatio || aspectRatio > maxAspectRatio) + { + continue; + } + + quads.push_back(std::pair(boxSize, classId)); + } +} + +bool less_pred(const std::pair& p1, const std::pair& p2) +{ + return p1.first < p2.first; +} + +void countClasses(const std::vector >& pairs, size_t idx1, size_t idx2, std::vector& counts) +{ + counts.assign(2, 0); + for (size_t i = idx1; i != idx2; ++i) + { + counts[pairs[i].second]++; + } +} + +bool +Chessboard::checkChessboard(const cv::Mat& image, cv::Size patternSize) const +{ + const int erosionCount = 1; + const float blackLevel = 20.f; + const float whiteLevel = 130.f; + const float blackWhiteGap = 70.f; + + cv::Mat white = image.clone(); + + cv::Mat black = image.clone(); + + cv::erode(white, white, cv::Mat(), cv::Point(-1,-1), erosionCount); + cv::dilate(black, black, cv::Mat(), cv::Point(-1,-1), erosionCount); + + cv::Mat thresh(image.rows, image.cols, CV_8UC1); + + bool result = false; + for (float threshLevel = blackLevel; threshLevel < whiteLevel && !result; threshLevel += 20.0f) + { + cv::threshold(white, thresh, threshLevel + blackWhiteGap, 255, CV_THRESH_BINARY); + + std::vector< std::vector > contours; + std::vector hierarchy; + + // Initialize contour retrieving routine + cv::findContours(thresh, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); + + std::vector > quads; + getQuadrangleHypotheses(contours, quads, 1); + + cv::threshold(black, thresh, threshLevel, 255, CV_THRESH_BINARY_INV); + + cv::findContours(thresh, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); + getQuadrangleHypotheses(contours, quads, 0); + + const size_t min_quads_count = patternSize.width * patternSize.height / 2; + std::sort(quads.begin(), quads.end(), less_pred); + + // now check if there are many hypotheses with similar sizes + // do this by floodfill-style algorithm + const float sizeRelDev = 0.4f; + + for (size_t i = 0; i < quads.size(); ++i) + { + size_t j = i + 1; + for (; j < quads.size(); ++j) + { + if (quads[j].first / quads[i].first > 1.0f + sizeRelDev) + { + break; + } + } + + if (j + 1 > min_quads_count + i) + { + // check the number of black and white squares + std::vector counts; + countClasses(quads, i, j, counts); + const int blackCount = lroundf(ceilf(patternSize.width / 2.0f) * ceilf(patternSize.height / 2.0f)); + const int whiteCount = lroundf(floorf(patternSize.width / 2.0f) * floorf(patternSize.height / 2.0f)); + if (counts[0] < blackCount * 0.75f || + counts[1] < whiteCount * 0.75f) + { + continue; + } + result = true; + break; + } + } + } + + return result; +} + +bool +Chessboard::checkBoardMonotony(std::vector& corners, + cv::Size patternSize) +{ + const float threshFactor = 0.2f; + + Spline splineXY, splineYX; + splineXY.setLowBC(Spline::PARABOLIC_RUNOUT_BC); + splineXY.setHighBC(Spline::PARABOLIC_RUNOUT_BC); + splineYX.setLowBC(Spline::PARABOLIC_RUNOUT_BC); + splineYX.setHighBC(Spline::PARABOLIC_RUNOUT_BC); + + // check if each row of corners approximates a cubic spline + for (int i = 0; i < patternSize.height; ++i) + { + splineXY.clear(); + splineYX.clear(); + + cv::Point2f p[3]; + p[0] = corners.at(i * patternSize.width)->pt; + p[1] = corners.at(i * patternSize.width + patternSize.width / 2)->pt; + p[2] = corners.at(i * patternSize.width + patternSize.width - 1)->pt; + + for (int j = 0; j < 3; ++j) + { + splineXY.addPoint(p[j].x, p[j].y); + splineYX.addPoint(p[j].y, p[j].x); + } + + for (int j = 1; j < patternSize.width - 1; ++j) + { + cv::Point2f& p_j = corners.at(i * patternSize.width + j)->pt; + + float thresh = std::numeric_limits::max(); + + // up-neighbor + if (i > 0) + { + cv::Point2f& neighbor = corners.at((i - 1) * patternSize.width + j)->pt; + thresh = fminf(thresh, cv::norm(neighbor - p_j)); + } + // down-neighbor + if (i < patternSize.height - 1) + { + cv::Point2f& neighbor = corners.at((i + 1) * patternSize.width + j)->pt; + thresh = fminf(thresh, cv::norm(neighbor - p_j)); + } + // left-neighbor + { + cv::Point2f& neighbor = corners.at(i * patternSize.width + j - 1)->pt; + thresh = fminf(thresh, cv::norm(neighbor - p_j)); + } + // right-neighbor + { + cv::Point2f& neighbor = corners.at(i * patternSize.width + j + 1)->pt; + thresh = fminf(thresh, cv::norm(neighbor - p_j)); + } + + thresh *= threshFactor; + + if (fminf(fabsf(splineXY(p_j.x) - p_j.y), fabsf(splineYX(p_j.y) - p_j.x)) > thresh) + { + return false; + } + } + } + + // check if each column of corners approximates a cubic spline + for (int j = 0; j < patternSize.width; ++j) + { + splineXY.clear(); + splineYX.clear(); + + cv::Point2f p[3]; + p[0] = corners.at(j)->pt; + p[1] = corners.at(patternSize.height / 2 * patternSize.width + j)->pt; + p[2] = corners.at((patternSize.height - 1) * patternSize.width + j)->pt; + + for (int i = 0; i < 3; ++i) + { + splineXY.addPoint(p[i].x, p[i].y); + splineYX.addPoint(p[i].y, p[i].x); + } + + for (int i = 1; i < patternSize.height - 1; ++i) + { + cv::Point2f& p_i = corners.at(i * patternSize.width + j)->pt; + + float thresh = std::numeric_limits::max(); + + // up-neighbor + { + cv::Point2f& neighbor = corners.at((i - 1) * patternSize.width + j)->pt; + thresh = fminf(thresh, cv::norm(neighbor - p_i)); + } + // down-neighbor + { + cv::Point2f& neighbor = corners.at((i + 1) * patternSize.width + j)->pt; + thresh = fminf(thresh, cv::norm(neighbor - p_i)); + } + // left-neighbor + if (j > 0) + { + cv::Point2f& neighbor = corners.at(i * patternSize.width + j - 1)->pt; + thresh = fminf(thresh, cv::norm(neighbor - p_i)); + } + // right-neighbor + if (j < patternSize.width - 1) + { + cv::Point2f& neighbor = corners.at(i * patternSize.width + j + 1)->pt; + thresh = fminf(thresh, cv::norm(neighbor - p_i)); + } + + thresh *= threshFactor; + + if (fminf(fabsf(splineXY(p_i.x) - p_i.y), fabsf(splineYX(p_i.y) - p_i.x)) > thresh) + { + return false; + } + } + } + + return true; +} + +bool +Chessboard::matchCorners(ChessboardQuadPtr& quad1, int corner1, + ChessboardQuadPtr& quad2, int corner2) const +{ + // First Check everything from the viewpoint of the + // current quad compute midpoints of "parallel" quad + // sides 1 + float x1 = (quad1->corners[corner1]->pt.x + quad1->corners[(corner1+1)%4]->pt.x)/2; + float y1 = (quad1->corners[corner1]->pt.y + quad1->corners[(corner1+1)%4]->pt.y)/2; + float x2 = (quad1->corners[(corner1+2)%4]->pt.x + quad1->corners[(corner1+3)%4]->pt.x)/2; + float y2 = (quad1->corners[(corner1+2)%4]->pt.y + quad1->corners[(corner1+3)%4]->pt.y)/2; + // compute midpoints of "parallel" quad sides 2 + float x3 = (quad1->corners[corner1]->pt.x + quad1->corners[(corner1+3)%4]->pt.x)/2; + float y3 = (quad1->corners[corner1]->pt.y + quad1->corners[(corner1+3)%4]->pt.y)/2; + float x4 = (quad1->corners[(corner1+1)%4]->pt.x + quad1->corners[(corner1+2)%4]->pt.x)/2; + float y4 = (quad1->corners[(corner1+1)%4]->pt.y + quad1->corners[(corner1+2)%4]->pt.y)/2; + + // MARTIN: Heuristic + // For corner2 of quad2 to be considered, + // it needs to be on the same side of the two lines as + // corner1. This is given, if the cross product has + // the same sign for both computations below: + float a1 = x1 - x2; + float b1 = y1 - y2; + // the current corner + float c11 = quad1->corners[corner1]->pt.x - x2; + float d11 = quad1->corners[corner1]->pt.y - y2; + // the candidate corner + float c12 = quad2->corners[corner2]->pt.x - x2; + float d12 = quad2->corners[corner2]->pt.y - y2; + float sign11 = a1*d11 - c11*b1; + float sign12 = a1*d12 - c12*b1; + + float a2 = x3 - x4; + float b2 = y3 - y4; + // the current corner + float c21 = quad1->corners[corner1]->pt.x - x4; + float d21 = quad1->corners[corner1]->pt.y - y4; + // the candidate corner + float c22 = quad2->corners[corner2]->pt.x - x4; + float d22 = quad2->corners[corner2]->pt.y - y4; + float sign21 = a2*d21 - c21*b2; + float sign22 = a2*d22 - c22*b2; + + // Also make shure that two border quads of the same row or + // column don't link. Check from the current corner's view, + // whether the corner diagonal from the candidate corner + // is also on the same side of the two lines as the current + // corner and the candidate corner. + float c13 = quad2->corners[(corner2+2)%4]->pt.x - x2; + float d13 = quad2->corners[(corner2+2)%4]->pt.y - y2; + float c23 = quad2->corners[(corner2+2)%4]->pt.x - x4; + float d23 = quad2->corners[(corner2+2)%4]->pt.y - y4; + float sign13 = a1*d13 - c13*b1; + float sign23 = a2*d23 - c23*b2; + + + // Second: Then check everything from the viewpoint of + // the candidate quad. Compute midpoints of "parallel" + // quad sides 1 + float u1 = (quad2->corners[corner2]->pt.x + quad2->corners[(corner2+1)%4]->pt.x)/2; + float v1 = (quad2->corners[corner2]->pt.y + quad2->corners[(corner2+1)%4]->pt.y)/2; + float u2 = (quad2->corners[(corner2+2)%4]->pt.x + quad2->corners[(corner2+3)%4]->pt.x)/2; + float v2 = (quad2->corners[(corner2+2)%4]->pt.y + quad2->corners[(corner2+3)%4]->pt.y)/2; + // compute midpoints of "parallel" quad sides 2 + float u3 = (quad2->corners[corner2]->pt.x + quad2->corners[(corner2+3)%4]->pt.x)/2; + float v3 = (quad2->corners[corner2]->pt.y + quad2->corners[(corner2+3)%4]->pt.y)/2; + float u4 = (quad2->corners[(corner2+1)%4]->pt.x + quad2->corners[(corner2+2)%4]->pt.x)/2; + float v4 = (quad2->corners[(corner2+1)%4]->pt.y + quad2->corners[(corner2+2)%4]->pt.y)/2; + + // MARTIN: Heuristic + // For corner2 of quad2 to be considered, + // it needs to be on the same side of the two lines as + // corner1. This is given, if the cross product has + // the same sign for both computations below: + float a3 = u1 - u2; + float b3 = v1 - v2; + // the current corner + float c31 = quad1->corners[corner1]->pt.x - u2; + float d31 = quad1->corners[corner1]->pt.y - v2; + // the candidate corner + float c32 = quad2->corners[corner2]->pt.x - u2; + float d32 = quad2->corners[corner2]->pt.y - v2; + float sign31 = a3*d31-c31*b3; + float sign32 = a3*d32-c32*b3; + + float a4 = u3 - u4; + float b4 = v3 - v4; + // the current corner + float c41 = quad1->corners[corner1]->pt.x - u4; + float d41 = quad1->corners[corner1]->pt.y - v4; + // the candidate corner + float c42 = quad2->corners[corner2]->pt.x - u4; + float d42 = quad2->corners[corner2]->pt.y - v4; + float sign41 = a4*d41-c41*b4; + float sign42 = a4*d42-c42*b4; + + // Also make sure that two border quads of the same row or + // column don't link. Check from the candidate corner's view, + // whether the corner diagonal from the current corner + // is also on the same side of the two lines as the current + // corner and the candidate corner. + float c33 = quad1->corners[(corner1+2)%4]->pt.x - u2; + float d33 = quad1->corners[(corner1+2)%4]->pt.y - v2; + float c43 = quad1->corners[(corner1+2)%4]->pt.x - u4; + float d43 = quad1->corners[(corner1+2)%4]->pt.y - v4; + float sign33 = a3*d33-c33*b3; + float sign43 = a4*d43-c43*b4; + + + // This time we also need to make shure, that no quad + // is linked to a quad of another dilation run which + // may lie INSIDE it!!! + // Third: Therefore check everything from the viewpoint + // of the current quad compute midpoints of "parallel" + // quad sides 1 + float x5 = quad1->corners[corner1]->pt.x; + float y5 = quad1->corners[corner1]->pt.y; + float x6 = quad1->corners[(corner1+1)%4]->pt.x; + float y6 = quad1->corners[(corner1+1)%4]->pt.y; + // compute midpoints of "parallel" quad sides 2 + float x7 = x5; + float y7 = y5; + float x8 = quad1->corners[(corner1+3)%4]->pt.x; + float y8 = quad1->corners[(corner1+3)%4]->pt.y; + + // MARTIN: Heuristic + // For corner2 of quad2 to be considered, + // it needs to be on the other side of the two lines than + // corner1. This is given, if the cross product has + // a different sign for both computations below: + float a5 = x6 - x5; + float b5 = y6 - y5; + // the current corner + float c51 = quad1->corners[(corner1+2)%4]->pt.x - x5; + float d51 = quad1->corners[(corner1+2)%4]->pt.y - y5; + // the candidate corner + float c52 = quad2->corners[corner2]->pt.x - x5; + float d52 = quad2->corners[corner2]->pt.y - y5; + float sign51 = a5*d51 - c51*b5; + float sign52 = a5*d52 - c52*b5; + + float a6 = x8 - x7; + float b6 = y8 - y7; + // the current corner + float c61 = quad1->corners[(corner1+2)%4]->pt.x - x7; + float d61 = quad1->corners[(corner1+2)%4]->pt.y - y7; + // the candidate corner + float c62 = quad2->corners[corner2]->pt.x - x7; + float d62 = quad2->corners[corner2]->pt.y - y7; + float sign61 = a6*d61 - c61*b6; + float sign62 = a6*d62 - c62*b6; + + + // Fourth: Then check everything from the viewpoint of + // the candidate quad compute midpoints of "parallel" + // quad sides 1 + float u5 = quad2->corners[corner2]->pt.x; + float v5 = quad2->corners[corner2]->pt.y; + float u6 = quad2->corners[(corner2+1)%4]->pt.x; + float v6 = quad2->corners[(corner2+1)%4]->pt.y; + // compute midpoints of "parallel" quad sides 2 + float u7 = u5; + float v7 = v5; + float u8 = quad2->corners[(corner2+3)%4]->pt.x; + float v8 = quad2->corners[(corner2+3)%4]->pt.y; + + // MARTIN: Heuristic + // For corner2 of quad2 to be considered, + // it needs to be on the other side of the two lines than + // corner1. This is given, if the cross product has + // a different sign for both computations below: + float a7 = u6 - u5; + float b7 = v6 - v5; + // the current corner + float c71 = quad1->corners[corner1]->pt.x - u5; + float d71 = quad1->corners[corner1]->pt.y - v5; + // the candidate corner + float c72 = quad2->corners[(corner2+2)%4]->pt.x - u5; + float d72 = quad2->corners[(corner2+2)%4]->pt.y - v5; + float sign71 = a7*d71-c71*b7; + float sign72 = a7*d72-c72*b7; + + float a8 = u8 - u7; + float b8 = v8 - v7; + // the current corner + float c81 = quad1->corners[corner1]->pt.x - u7; + float d81 = quad1->corners[corner1]->pt.y - v7; + // the candidate corner + float c82 = quad2->corners[(corner2+2)%4]->pt.x - u7; + float d82 = quad2->corners[(corner2+2)%4]->pt.y - v7; + float sign81 = a8*d81-c81*b8; + float sign82 = a8*d82-c82*b8; + + // Check whether conditions are fulfilled + if (((sign11 < 0 && sign12 < 0) || (sign11 > 0 && sign12 > 0)) && + ((sign21 < 0 && sign22 < 0) || (sign21 > 0 && sign22 > 0)) && + ((sign31 < 0 && sign32 < 0) || (sign31 > 0 && sign32 > 0)) && + ((sign41 < 0 && sign42 < 0) || (sign41 > 0 && sign42 > 0)) && + ((sign11 < 0 && sign13 < 0) || (sign11 > 0 && sign13 > 0)) && + ((sign21 < 0 && sign23 < 0) || (sign21 > 0 && sign23 > 0)) && + ((sign31 < 0 && sign33 < 0) || (sign31 > 0 && sign33 > 0)) && + ((sign41 < 0 && sign43 < 0) || (sign41 > 0 && sign43 > 0)) && + ((sign51 < 0 && sign52 > 0) || (sign51 > 0 && sign52 < 0)) && + ((sign61 < 0 && sign62 > 0) || (sign61 > 0 && sign62 < 0)) && + ((sign71 < 0 && sign72 > 0) || (sign71 > 0 && sign72 < 0)) && + ((sign81 < 0 && sign82 > 0) || (sign81 > 0 && sign82 < 0))) + { + return true; + } + else + { + return false; + } +} + +} diff --git a/camera_models/src/gpl/EigenQuaternionParameterization.cc b/camera_models/src/gpl/EigenQuaternionParameterization.cc new file mode 100644 index 0000000..e97b057 --- /dev/null +++ b/camera_models/src/gpl/EigenQuaternionParameterization.cc @@ -0,0 +1,46 @@ +#include "camodocal/gpl/EigenQuaternionParameterization.h" + +#include + +namespace camodocal +{ + +bool +EigenQuaternionParameterization::Plus(const double* x, + const double* delta, + double* x_plus_delta) const +{ + const double norm_delta = + sqrt(delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]); + if (norm_delta > 0.0) + { + const double sin_delta_by_delta = (sin(norm_delta) / norm_delta); + double q_delta[4]; + q_delta[0] = sin_delta_by_delta * delta[0]; + q_delta[1] = sin_delta_by_delta * delta[1]; + q_delta[2] = sin_delta_by_delta * delta[2]; + q_delta[3] = cos(norm_delta); + EigenQuaternionProduct(q_delta, x, x_plus_delta); + } + else + { + for (int i = 0; i < 4; ++i) + { + x_plus_delta[i] = x[i]; + } + } + return true; +} + +bool +EigenQuaternionParameterization::ComputeJacobian(const double* x, + double* jacobian) const +{ + jacobian[0] = x[3]; jacobian[1] = x[2]; jacobian[2] = -x[1]; // NOLINT + jacobian[3] = -x[2]; jacobian[4] = x[3]; jacobian[5] = x[0]; // NOLINT + jacobian[6] = x[1]; jacobian[7] = -x[0]; jacobian[8] = x[3]; // NOLINT + jacobian[9] = -x[0]; jacobian[10] = -x[1]; jacobian[11] = -x[2]; // NOLINT + return true; +} + +} diff --git a/camera_models/src/gpl/gpl.cc b/camera_models/src/gpl/gpl.cc new file mode 100644 index 0000000..9142a3a --- /dev/null +++ b/camera_models/src/gpl/gpl.cc @@ -0,0 +1,928 @@ +#include "camodocal/gpl/gpl.h" + +#include +#ifdef _WIN32 +#include +#else +#include +#endif + + +// source: https://stackoverflow.com/questions/5167269/clock-gettime-alternative-in-mac-os-x +#ifdef __APPLE__ +#include +#define ORWL_NANO (+1.0E-9) +#define ORWL_GIGA UINT64_C(1000000000) + +static double orwl_timebase = 0.0; +static uint64_t orwl_timestart = 0; + +struct timespec orwl_gettime(void) { + // be more careful in a multithreaded environement + if (!orwl_timestart) { + mach_timebase_info_data_t tb = { 0 }; + mach_timebase_info(&tb); + orwl_timebase = tb.numer; + orwl_timebase /= tb.denom; + orwl_timestart = mach_absolute_time(); + } + struct timespec t; + double diff = (mach_absolute_time() - orwl_timestart) * orwl_timebase; + t.tv_sec = diff * ORWL_NANO; + t.tv_nsec = diff - (t.tv_sec * ORWL_GIGA); + return t; +} +#endif // __APPLE__ + + +const double WGS84_A = 6378137.0; +const double WGS84_ECCSQ = 0.00669437999013; + +// Windows lacks fminf +#ifndef fminf +#define fminf(x, y) (((x) < (y)) ? (x) : (y)) +#endif + +namespace camodocal +{ + +double hypot3(double x, double y, double z) +{ + return sqrt(square(x) + square(y) + square(z)); +} + +float hypot3f(float x, float y, float z) +{ + return sqrtf(square(x) + square(y) + square(z)); +} + +double d2r(double deg) +{ + return deg / 180.0 * M_PI; +} + +float d2r(float deg) +{ + return deg / 180.0f * M_PI; +} + +double r2d(double rad) +{ + return rad / M_PI * 180.0; +} + +float r2d(float rad) +{ + return rad / M_PI * 180.0f; +} + +double sinc(double theta) +{ + return sin(theta) / theta; +} + +#ifdef _WIN32 +#include +#include +#include +LARGE_INTEGER +getFILETIMEoffset() +{ + SYSTEMTIME s; + FILETIME f; + LARGE_INTEGER t; + + s.wYear = 1970; + s.wMonth = 1; + s.wDay = 1; + s.wHour = 0; + s.wMinute = 0; + s.wSecond = 0; + s.wMilliseconds = 0; + SystemTimeToFileTime(&s, &f); + t.QuadPart = f.dwHighDateTime; + t.QuadPart <<= 32; + t.QuadPart |= f.dwLowDateTime; + return (t); +} + +int +clock_gettime(int X, struct timespec *tp) +{ + LARGE_INTEGER t; + FILETIME f; + double microseconds; + static LARGE_INTEGER offset; + static double frequencyToMicroseconds; + static int initialized = 0; + static BOOL usePerformanceCounter = 0; + + if (!initialized) { + LARGE_INTEGER performanceFrequency; + initialized = 1; + usePerformanceCounter = QueryPerformanceFrequency(&performanceFrequency); + if (usePerformanceCounter) { + QueryPerformanceCounter(&offset); + frequencyToMicroseconds = (double)performanceFrequency.QuadPart / 1000000.; + } else { + offset = getFILETIMEoffset(); + frequencyToMicroseconds = 10.; + } + } + if (usePerformanceCounter) QueryPerformanceCounter(&t); + else { + GetSystemTimeAsFileTime(&f); + t.QuadPart = f.dwHighDateTime; + t.QuadPart <<= 32; + t.QuadPart |= f.dwLowDateTime; + } + + t.QuadPart -= offset.QuadPart; + microseconds = (double)t.QuadPart / frequencyToMicroseconds; + t.QuadPart = microseconds; + tp->tv_sec = t.QuadPart / 1000000; + tp->tv_nsec = (t.QuadPart % 1000000) * 1000; + return (0); +} +#endif + +unsigned long long timeInMicroseconds(void) +{ + struct timespec tp; +#ifdef __APPLE__ + tp = orwl_gettime(); +#else + clock_gettime(CLOCK_REALTIME, &tp); +#endif + + return tp.tv_sec * 1000000 + tp.tv_nsec / 1000; +} + +double timeInSeconds(void) +{ + struct timespec tp; +#ifdef __APPLE__ + tp = orwl_gettime(); +#else + clock_gettime(CLOCK_REALTIME, &tp); +#endif + + return static_cast(tp.tv_sec) + + static_cast(tp.tv_nsec) / 1000000000.0; +} + +float colormapAutumn[128][3] = +{ + {1.0f,0.f,0.f}, + {1.0f,0.007874f,0.f}, + {1.0f,0.015748f,0.f}, + {1.0f,0.023622f,0.f}, + {1.0f,0.031496f,0.f}, + {1.0f,0.03937f,0.f}, + {1.0f,0.047244f,0.f}, + {1.0f,0.055118f,0.f}, + {1.0f,0.062992f,0.f}, + {1.0f,0.070866f,0.f}, + {1.0f,0.07874f,0.f}, + {1.0f,0.086614f,0.f}, + {1.0f,0.094488f,0.f}, + {1.0f,0.10236f,0.f}, + {1.0f,0.11024f,0.f}, + {1.0f,0.11811f,0.f}, + {1.0f,0.12598f,0.f}, + {1.0f,0.13386f,0.f}, + {1.0f,0.14173f,0.f}, + {1.0f,0.14961f,0.f}, + {1.0f,0.15748f,0.f}, + {1.0f,0.16535f,0.f}, + {1.0f,0.17323f,0.f}, + {1.0f,0.1811f,0.f}, + {1.0f,0.18898f,0.f}, + {1.0f,0.19685f,0.f}, + {1.0f,0.20472f,0.f}, + {1.0f,0.2126f,0.f}, + {1.0f,0.22047f,0.f}, + {1.0f,0.22835f,0.f}, + {1.0f,0.23622f,0.f}, + {1.0f,0.24409f,0.f}, + {1.0f,0.25197f,0.f}, + {1.0f,0.25984f,0.f}, + {1.0f,0.26772f,0.f}, + {1.0f,0.27559f,0.f}, + {1.0f,0.28346f,0.f}, + {1.0f,0.29134f,0.f}, + {1.0f,0.29921f,0.f}, + {1.0f,0.30709f,0.f}, + {1.0f,0.31496f,0.f}, + {1.0f,0.32283f,0.f}, + {1.0f,0.33071f,0.f}, + {1.0f,0.33858f,0.f}, + {1.0f,0.34646f,0.f}, + {1.0f,0.35433f,0.f}, + {1.0f,0.3622f,0.f}, + {1.0f,0.37008f,0.f}, + {1.0f,0.37795f,0.f}, + {1.0f,0.38583f,0.f}, + {1.0f,0.3937f,0.f}, + {1.0f,0.40157f,0.f}, + {1.0f,0.40945f,0.f}, + {1.0f,0.41732f,0.f}, + {1.0f,0.4252f,0.f}, + {1.0f,0.43307f,0.f}, + {1.0f,0.44094f,0.f}, + {1.0f,0.44882f,0.f}, + {1.0f,0.45669f,0.f}, + {1.0f,0.46457f,0.f}, + {1.0f,0.47244f,0.f}, + {1.0f,0.48031f,0.f}, + {1.0f,0.48819f,0.f}, + {1.0f,0.49606f,0.f}, + {1.0f,0.50394f,0.f}, + {1.0f,0.51181f,0.f}, + {1.0f,0.51969f,0.f}, + {1.0f,0.52756f,0.f}, + {1.0f,0.53543f,0.f}, + {1.0f,0.54331f,0.f}, + {1.0f,0.55118f,0.f}, + {1.0f,0.55906f,0.f}, + {1.0f,0.56693f,0.f}, + {1.0f,0.5748f,0.f}, + {1.0f,0.58268f,0.f}, + {1.0f,0.59055f,0.f}, + {1.0f,0.59843f,0.f}, + {1.0f,0.6063f,0.f}, + {1.0f,0.61417f,0.f}, + {1.0f,0.62205f,0.f}, + {1.0f,0.62992f,0.f}, + {1.0f,0.6378f,0.f}, + {1.0f,0.64567f,0.f}, + {1.0f,0.65354f,0.f}, + {1.0f,0.66142f,0.f}, + {1.0f,0.66929f,0.f}, + {1.0f,0.67717f,0.f}, + {1.0f,0.68504f,0.f}, + {1.0f,0.69291f,0.f}, + {1.0f,0.70079f,0.f}, + {1.0f,0.70866f,0.f}, + {1.0f,0.71654f,0.f}, + {1.0f,0.72441f,0.f}, + {1.0f,0.73228f,0.f}, + {1.0f,0.74016f,0.f}, + {1.0f,0.74803f,0.f}, + {1.0f,0.75591f,0.f}, + {1.0f,0.76378f,0.f}, + {1.0f,0.77165f,0.f}, + {1.0f,0.77953f,0.f}, + {1.0f,0.7874f,0.f}, + {1.0f,0.79528f,0.f}, + {1.0f,0.80315f,0.f}, + {1.0f,0.81102f,0.f}, + {1.0f,0.8189f,0.f}, + {1.0f,0.82677f,0.f}, + {1.0f,0.83465f,0.f}, + {1.0f,0.84252f,0.f}, + {1.0f,0.85039f,0.f}, + {1.0f,0.85827f,0.f}, + {1.0f,0.86614f,0.f}, + {1.0f,0.87402f,0.f}, + {1.0f,0.88189f,0.f}, + {1.0f,0.88976f,0.f}, + {1.0f,0.89764f,0.f}, + {1.0f,0.90551f,0.f}, + {1.0f,0.91339f,0.f}, + {1.0f,0.92126f,0.f}, + {1.0f,0.92913f,0.f}, + {1.0f,0.93701f,0.f}, + {1.0f,0.94488f,0.f}, + {1.0f,0.95276f,0.f}, + {1.0f,0.96063f,0.f}, + {1.0f,0.9685f,0.f}, + {1.0f,0.97638f,0.f}, + {1.0f,0.98425f,0.f}, + {1.0f,0.99213f,0.f}, + {1.0f,1.0f,0.0f} +}; + + +float colormapJet[128][3] = +{ + {0.0f,0.0f,0.53125f}, + {0.0f,0.0f,0.5625f}, + {0.0f,0.0f,0.59375f}, + {0.0f,0.0f,0.625f}, + {0.0f,0.0f,0.65625f}, + {0.0f,0.0f,0.6875f}, + {0.0f,0.0f,0.71875f}, + {0.0f,0.0f,0.75f}, + {0.0f,0.0f,0.78125f}, + {0.0f,0.0f,0.8125f}, + {0.0f,0.0f,0.84375f}, + {0.0f,0.0f,0.875f}, + {0.0f,0.0f,0.90625f}, + {0.0f,0.0f,0.9375f}, + {0.0f,0.0f,0.96875f}, + {0.0f,0.0f,1.0f}, + {0.0f,0.03125f,1.0f}, + {0.0f,0.0625f,1.0f}, + {0.0f,0.09375f,1.0f}, + {0.0f,0.125f,1.0f}, + {0.0f,0.15625f,1.0f}, + {0.0f,0.1875f,1.0f}, + {0.0f,0.21875f,1.0f}, + {0.0f,0.25f,1.0f}, + {0.0f,0.28125f,1.0f}, + {0.0f,0.3125f,1.0f}, + {0.0f,0.34375f,1.0f}, + {0.0f,0.375f,1.0f}, + {0.0f,0.40625f,1.0f}, + {0.0f,0.4375f,1.0f}, + {0.0f,0.46875f,1.0f}, + {0.0f,0.5f,1.0f}, + {0.0f,0.53125f,1.0f}, + {0.0f,0.5625f,1.0f}, + {0.0f,0.59375f,1.0f}, + {0.0f,0.625f,1.0f}, + {0.0f,0.65625f,1.0f}, + {0.0f,0.6875f,1.0f}, + {0.0f,0.71875f,1.0f}, + {0.0f,0.75f,1.0f}, + {0.0f,0.78125f,1.0f}, + {0.0f,0.8125f,1.0f}, + {0.0f,0.84375f,1.0f}, + {0.0f,0.875f,1.0f}, + {0.0f,0.90625f,1.0f}, + {0.0f,0.9375f,1.0f}, + {0.0f,0.96875f,1.0f}, + {0.0f,1.0f,1.0f}, + {0.03125f,1.0f,0.96875f}, + {0.0625f,1.0f,0.9375f}, + {0.09375f,1.0f,0.90625f}, + {0.125f,1.0f,0.875f}, + {0.15625f,1.0f,0.84375f}, + {0.1875f,1.0f,0.8125f}, + {0.21875f,1.0f,0.78125f}, + {0.25f,1.0f,0.75f}, + {0.28125f,1.0f,0.71875f}, + {0.3125f,1.0f,0.6875f}, + {0.34375f,1.0f,0.65625f}, + {0.375f,1.0f,0.625f}, + {0.40625f,1.0f,0.59375f}, + {0.4375f,1.0f,0.5625f}, + {0.46875f,1.0f,0.53125f}, + {0.5f,1.0f,0.5f}, + {0.53125f,1.0f,0.46875f}, + {0.5625f,1.0f,0.4375f}, + {0.59375f,1.0f,0.40625f}, + {0.625f,1.0f,0.375f}, + {0.65625f,1.0f,0.34375f}, + {0.6875f,1.0f,0.3125f}, + {0.71875f,1.0f,0.28125f}, + {0.75f,1.0f,0.25f}, + {0.78125f,1.0f,0.21875f}, + {0.8125f,1.0f,0.1875f}, + {0.84375f,1.0f,0.15625f}, + {0.875f,1.0f,0.125f}, + {0.90625f,1.0f,0.09375f}, + {0.9375f,1.0f,0.0625f}, + {0.96875f,1.0f,0.03125f}, + {1.0f,1.0f,0.0f}, + {1.0f,0.96875f,0.0f}, + {1.0f,0.9375f,0.0f}, + {1.0f,0.90625f,0.0f}, + {1.0f,0.875f,0.0f}, + {1.0f,0.84375f,0.0f}, + {1.0f,0.8125f,0.0f}, + {1.0f,0.78125f,0.0f}, + {1.0f,0.75f,0.0f}, + {1.0f,0.71875f,0.0f}, + {1.0f,0.6875f,0.0f}, + {1.0f,0.65625f,0.0f}, + {1.0f,0.625f,0.0f}, + {1.0f,0.59375f,0.0f}, + {1.0f,0.5625f,0.0f}, + {1.0f,0.53125f,0.0f}, + {1.0f,0.5f,0.0f}, + {1.0f,0.46875f,0.0f}, + {1.0f,0.4375f,0.0f}, + {1.0f,0.40625f,0.0f}, + {1.0f,0.375f,0.0f}, + {1.0f,0.34375f,0.0f}, + {1.0f,0.3125f,0.0f}, + {1.0f,0.28125f,0.0f}, + {1.0f,0.25f,0.0f}, + {1.0f,0.21875f,0.0f}, + {1.0f,0.1875f,0.0f}, + {1.0f,0.15625f,0.0f}, + {1.0f,0.125f,0.0f}, + {1.0f,0.09375f,0.0f}, + {1.0f,0.0625f,0.0f}, + {1.0f,0.03125f,0.0f}, + {1.0f,0.0f,0.0f}, + {0.96875f,0.0f,0.0f}, + {0.9375f,0.0f,0.0f}, + {0.90625f,0.0f,0.0f}, + {0.875f,0.0f,0.0f}, + {0.84375f,0.0f,0.0f}, + {0.8125f,0.0f,0.0f}, + {0.78125f,0.0f,0.0f}, + {0.75f,0.0f,0.0f}, + {0.71875f,0.0f,0.0f}, + {0.6875f,0.0f,0.0f}, + {0.65625f,0.0f,0.0f}, + {0.625f,0.0f,0.0f}, + {0.59375f,0.0f,0.0f}, + {0.5625f,0.0f,0.0f}, + {0.53125f,0.0f,0.0f}, + {0.5f,0.0f,0.0f} +}; + +void colorDepthImage(cv::Mat& imgDepth, cv::Mat& imgColoredDepth, + float minRange, float maxRange) +{ + imgColoredDepth = cv::Mat::zeros(imgDepth.size(), CV_8UC3); + + for (int i = 0; i < imgColoredDepth.rows; ++i) + { + const float* depth = imgDepth.ptr(i); + unsigned char* pixel = imgColoredDepth.ptr(i); + for (int j = 0; j < imgColoredDepth.cols; ++j) + { + if (depth[j] != 0) + { + int idx = fminf(depth[j] - minRange, maxRange - minRange) / (maxRange - minRange) * 127.0f; + idx = 127 - idx; + + pixel[0] = colormapJet[idx][2] * 255.0f; + pixel[1] = colormapJet[idx][1] * 255.0f; + pixel[2] = colormapJet[idx][0] * 255.0f; + } + + pixel += 3; + } + } +} + +bool colormap(const std::string& name, unsigned char idx, + float& r, float& g, float& b) +{ + if (name.compare("jet") == 0) + { + float* color = colormapJet[idx]; + + r = color[0]; + g = color[1]; + b = color[2]; + + return true; + } + else if (name.compare("autumn") == 0) + { + float* color = colormapAutumn[idx]; + + r = color[0]; + g = color[1]; + b = color[2]; + + return true; + } + + return false; +} + +std::vector bresLine(int x0, int y0, int x1, int y1) +{ + // Bresenham's line algorithm + // Find cells intersected by line between (x0,y0) and (x1,y1) + + std::vector cells; + + int dx = std::abs(x1 - x0); + int dy = std::abs(y1 - y0); + + int sx = (x0 < x1) ? 1 : -1; + int sy = (y0 < y1) ? 1 : -1; + + int err = dx - dy; + + while (1) + { + cells.push_back(cv::Point2i(x0, y0)); + + if (x0 == x1 && y0 == y1) + { + break; + } + + int e2 = 2 * err; + if (e2 > -dy) + { + err -= dy; + x0 += sx; + } + if (e2 < dx) + { + err += dx; + y0 += sy; + } + } + + return cells; +} + +std::vector bresCircle(int x0, int y0, int r) +{ + // Bresenham's circle algorithm + // Find cells intersected by circle with center (x0,y0) and radius r + + std::vector< std::vector > mask(2 * r + 1); + + for (int i = 0; i < 2 * r + 1; ++i) + { + mask[i].resize(2 * r + 1); + for (int j = 0; j < 2 * r + 1; ++j) + { + mask[i][j] = false; + } + } + + int f = 1 - r; + int ddF_x = 1; + int ddF_y = -2 * r; + int x = 0; + int y = r; + + std::vector line; + + line = bresLine(x0, y0 - r, x0, y0 + r); + for (std::vector::iterator it = line.begin(); it != line.end(); ++it) + { + mask[it->x - x0 + r][it->y - y0 + r] = true; + } + + line = bresLine(x0 - r, y0, x0 + r, y0); + for (std::vector::iterator it = line.begin(); it != line.end(); ++it) + { + mask[it->x - x0 + r][it->y - y0 + r] = true; + } + + while (x < y) + { + if (f >= 0) + { + y--; + ddF_y += 2; + f += ddF_y; + } + + x++; + ddF_x += 2; + f += ddF_x; + + line = bresLine(x0 - x, y0 + y, x0 + x, y0 + y); + for (std::vector::iterator it = line.begin(); it != line.end(); ++it) + { + mask[it->x - x0 + r][it->y - y0 + r] = true; + } + + line = bresLine(x0 - x, y0 - y, x0 + x, y0 - y); + for (std::vector::iterator it = line.begin(); it != line.end(); ++it) + { + mask[it->x - x0 + r][it->y - y0 + r] = true; + } + + line = bresLine(x0 - y, y0 + x, x0 + y, y0 + x); + for (std::vector::iterator it = line.begin(); it != line.end(); ++it) + { + mask[it->x - x0 + r][it->y - y0 + r] = true; + } + + line = bresLine(x0 - y, y0 - x, x0 + y, y0 - x); + for (std::vector::iterator it = line.begin(); it != line.end(); ++it) + { + mask[it->x - x0 + r][it->y - y0 + r] = true; + } + } + + std::vector cells; + for (int i = 0; i < 2 * r + 1; ++i) + { + for (int j = 0; j < 2 * r + 1; ++j) + { + if (mask[i][j]) + { + cells.push_back(cv::Point2i(i - r + x0, j - r + y0)); + } + } + } + + return cells; +} + +void +fitCircle(const std::vector& points, + double& centerX, double& centerY, double& radius) +{ + // D. Umbach, and K. Jones, A Few Methods for Fitting Circles to Data, + // IEEE Transactions on Instrumentation and Measurement, 2000 + // We use the modified least squares method. + double sum_x = 0.0; + double sum_y = 0.0; + double sum_xx = 0.0; + double sum_xy = 0.0; + double sum_yy = 0.0; + double sum_xxx = 0.0; + double sum_xxy = 0.0; + double sum_xyy = 0.0; + double sum_yyy = 0.0; + + int n = points.size(); + for (int i = 0; i < n; ++i) + { + double x = points.at(i).x; + double y = points.at(i).y; + + sum_x += x; + sum_y += y; + sum_xx += x * x; + sum_xy += x * y; + sum_yy += y * y; + sum_xxx += x * x * x; + sum_xxy += x * x * y; + sum_xyy += x * y * y; + sum_yyy += y * y * y; + } + + double A = n * sum_xx - square(sum_x); + double B = n * sum_xy - sum_x * sum_y; + double C = n * sum_yy - square(sum_y); + double D = 0.5 * (n * sum_xyy - sum_x * sum_yy + n * sum_xxx - sum_x * sum_xx); + double E = 0.5 * (n * sum_xxy - sum_y * sum_xx + n * sum_yyy - sum_y * sum_yy); + + centerX = (D * C - B * E) / (A * C - square(B)); + centerY = (A * E - B * D) / (A * C - square(B)); + + double sum_r = 0.0; + for (int i = 0; i < n; ++i) + { + double x = points.at(i).x; + double y = points.at(i).y; + + sum_r += hypot(x - centerX, y - centerY); + } + + radius = sum_r / n; +} + +std::vector +intersectCircles(double x1, double y1, double r1, + double x2, double y2, double r2) +{ + std::vector ipts; + + double d = hypot(x1 - x2, y1 - y2); + if (d > r1 + r2) + { + // circles are separate + return ipts; + } + if (d < fabs(r1 - r2)) + { + // one circle is contained within the other + return ipts; + } + + double a = (square(r1) - square(r2) + square(d)) / (2.0 * d); + double h = sqrt(square(r1) - square(a)); + + double x3 = x1 + a * (x2 - x1) / d; + double y3 = y1 + a * (y2 - y1) / d; + + if (h < 1e-10) + { + // two circles touch at one point + ipts.push_back(cv::Point2d(x3, y3)); + return ipts; + } + + ipts.push_back(cv::Point2d(x3 + h * (y2 - y1) / d, + y3 - h * (x2 - x1) / d)); + ipts.push_back(cv::Point2d(x3 - h * (y2 - y1) / d, + y3 + h * (x2 - x1) / d)); + return ipts; +} + +char +UTMLetterDesignator(double latitude) +{ + // This routine determines the correct UTM letter designator for the given latitude + // returns 'Z' if latitude is outside the UTM limits of 84N to 80S + // Written by Chuck Gantz- chuck.gantz@globalstar.com + char letterDesignator; + + if ((84.0 >= latitude) && (latitude >= 72.0)) letterDesignator = 'X'; + else if ((72.0 > latitude) && (latitude >= 64.0)) letterDesignator = 'W'; + else if ((64.0 > latitude) && (latitude >= 56.0)) letterDesignator = 'V'; + else if ((56.0 > latitude) && (latitude >= 48.0)) letterDesignator = 'U'; + else if ((48.0 > latitude) && (latitude >= 40.0)) letterDesignator = 'T'; + else if ((40.0 > latitude) && (latitude >= 32.0)) letterDesignator = 'S'; + else if ((32.0 > latitude) && (latitude >= 24.0)) letterDesignator = 'R'; + else if ((24.0 > latitude) && (latitude >= 16.0)) letterDesignator = 'Q'; + else if ((16.0 > latitude) && (latitude >= 8.0)) letterDesignator = 'P'; + else if (( 8.0 > latitude) && (latitude >= 0.0)) letterDesignator = 'N'; + else if (( 0.0 > latitude) && (latitude >= -8.0)) letterDesignator = 'M'; + else if ((-8.0 > latitude) && (latitude >= -16.0)) letterDesignator = 'L'; + else if ((-16.0 > latitude) && (latitude >= -24.0)) letterDesignator = 'K'; + else if ((-24.0 > latitude) && (latitude >= -32.0)) letterDesignator = 'J'; + else if ((-32.0 > latitude) && (latitude >= -40.0)) letterDesignator = 'H'; + else if ((-40.0 > latitude) && (latitude >= -48.0)) letterDesignator = 'G'; + else if ((-48.0 > latitude) && (latitude >= -56.0)) letterDesignator = 'F'; + else if ((-56.0 > latitude) && (latitude >= -64.0)) letterDesignator = 'E'; + else if ((-64.0 > latitude) && (latitude >= -72.0)) letterDesignator = 'D'; + else if ((-72.0 > latitude) && (latitude >= -80.0)) letterDesignator = 'C'; + else letterDesignator = 'Z'; //This is here as an error flag to show that the Latitude is outside the UTM limits + + return letterDesignator; +} + +void +LLtoUTM(double latitude, double longitude, + double& utmNorthing, double& utmEasting, std::string& utmZone) +{ + // converts lat/long to UTM coords. Equations from USGS Bulletin 1532 + // East Longitudes are positive, West longitudes are negative. + // North latitudes are positive, South latitudes are negative + // Lat and Long are in decimal degrees + // Written by Chuck Gantz- chuck.gantz@globalstar.com + + double k0 = 0.9996; + + double LongOrigin; + double eccPrimeSquared; + double N, T, C, A, M; + + double LatRad = latitude * M_PI / 180.0; + double LongRad = longitude * M_PI / 180.0; + double LongOriginRad; + + int ZoneNumber = static_cast((longitude + 180.0) / 6.0) + 1; + + if (latitude >= 56.0 && latitude < 64.0 && + longitude >= 3.0 && longitude < 12.0) { + ZoneNumber = 32; + } + + // Special zones for Svalbard + if (latitude >= 72.0 && latitude < 84.0) { + if ( longitude >= 0.0 && longitude < 9.0) ZoneNumber = 31; + else if (longitude >= 9.0 && longitude < 21.0) ZoneNumber = 33; + else if (longitude >= 21.0 && longitude < 33.0) ZoneNumber = 35; + else if (longitude >= 33.0 && longitude < 42.0) ZoneNumber = 37; + } + LongOrigin = static_cast((ZoneNumber - 1) * 6 - 180 + 3); //+3 puts origin in middle of zone + LongOriginRad = LongOrigin * M_PI / 180.0; + + // compute the UTM Zone from the latitude and longitude + std::ostringstream oss; + oss << ZoneNumber << UTMLetterDesignator(latitude); + utmZone = oss.str(); + + eccPrimeSquared = WGS84_ECCSQ / (1.0 - WGS84_ECCSQ); + + N = WGS84_A / sqrt(1.0 - WGS84_ECCSQ * sin(LatRad) * sin(LatRad)); + T = tan(LatRad) * tan(LatRad); + C = eccPrimeSquared * cos(LatRad) * cos(LatRad); + A = cos(LatRad) * (LongRad - LongOriginRad); + + M = WGS84_A * ((1.0 - WGS84_ECCSQ / 4.0 + - 3.0 * WGS84_ECCSQ * WGS84_ECCSQ / 64.0 + - 5.0 * WGS84_ECCSQ * WGS84_ECCSQ * WGS84_ECCSQ / 256.0) + * LatRad + - (3.0 * WGS84_ECCSQ / 8.0 + + 3.0 * WGS84_ECCSQ * WGS84_ECCSQ / 32.0 + + 45.0 * WGS84_ECCSQ * WGS84_ECCSQ * WGS84_ECCSQ / 1024.0) + * sin(2.0 * LatRad) + + (15.0 * WGS84_ECCSQ * WGS84_ECCSQ / 256.0 + + 45.0 * WGS84_ECCSQ * WGS84_ECCSQ * WGS84_ECCSQ / 1024.0) + * sin(4.0 * LatRad) + - (35.0 * WGS84_ECCSQ * WGS84_ECCSQ * WGS84_ECCSQ / 3072.0) + * sin(6.0 * LatRad)); + + utmEasting = k0 * N * (A + (1.0 - T + C) * A * A * A / 6.0 + + (5.0 - 18.0 * T + T * T + 72.0 * C + - 58.0 * eccPrimeSquared) + * A * A * A * A * A / 120.0) + + 500000.0; + + utmNorthing = k0 * (M + N * tan(LatRad) * + (A * A / 2.0 + + (5.0 - T + 9.0 * C + 4.0 * C * C) * A * A * A * A / 24.0 + + (61.0 - 58.0 * T + T * T + 600.0 * C + - 330.0 * eccPrimeSquared) + * A * A * A * A * A * A / 720.0)); + if (latitude < 0.0) { + utmNorthing += 10000000.0; //10000000 meter offset for southern hemisphere + } +} + +void +UTMtoLL(double utmNorthing, double utmEasting, const std::string& utmZone, + double& latitude, double& longitude) +{ + // converts UTM coords to lat/long. Equations from USGS Bulletin 1532 + // East Longitudes are positive, West longitudes are negative. + // North latitudes are positive, South latitudes are negative + // Lat and Long are in decimal degrees. + // Written by Chuck Gantz- chuck.gantz@globalstar.com + + double k0 = 0.9996; + double eccPrimeSquared; + double e1 = (1.0 - sqrt(1.0 - WGS84_ECCSQ)) / (1.0 + sqrt(1.0 - WGS84_ECCSQ)); + double N1, T1, C1, R1, D, M; + double LongOrigin; + double mu, phi1, phi1Rad; + double x, y; + int ZoneNumber; + char ZoneLetter; + bool NorthernHemisphere; + + x = utmEasting - 500000.0; //remove 500,000 meter offset for longitude + y = utmNorthing; + + std::istringstream iss(utmZone); + iss >> ZoneNumber >> ZoneLetter; + if ((static_cast(ZoneLetter) - static_cast('N')) >= 0) { + NorthernHemisphere = true;//point is in northern hemisphere + } else { + NorthernHemisphere = false;//point is in southern hemisphere + y -= 10000000.0;//remove 10,000,000 meter offset used for southern hemisphere + } + + LongOrigin = (ZoneNumber - 1.0) * 6.0 - 180.0 + 3.0; //+3 puts origin in middle of zone + + eccPrimeSquared = WGS84_ECCSQ / (1.0 - WGS84_ECCSQ); + + M = y / k0; + mu = M / (WGS84_A * (1.0 - WGS84_ECCSQ / 4.0 + - 3.0 * WGS84_ECCSQ * WGS84_ECCSQ / 64.0 + - 5.0 * WGS84_ECCSQ * WGS84_ECCSQ * WGS84_ECCSQ / 256.0)); + + phi1Rad = mu + (3.0 * e1 / 2.0 - 27.0 * e1 * e1 * e1 / 32.0) * sin(2.0 * mu) + + (21.0 * e1 * e1 / 16.0 - 55.0 * e1 * e1 * e1 * e1 / 32.0) + * sin(4.0 * mu) + + (151.0 * e1 * e1 * e1 / 96.0) * sin(6.0 * mu); + phi1 = phi1Rad / M_PI * 180.0; + + N1 = WGS84_A / sqrt(1.0 - WGS84_ECCSQ * sin(phi1Rad) * sin(phi1Rad)); + T1 = tan(phi1Rad) * tan(phi1Rad); + C1 = eccPrimeSquared * cos(phi1Rad) * cos(phi1Rad); + R1 = WGS84_A * (1.0 - WGS84_ECCSQ) / + pow(1.0 - WGS84_ECCSQ * sin(phi1Rad) * sin(phi1Rad), 1.5); + D = x / (N1 * k0); + + latitude = phi1Rad - (N1 * tan(phi1Rad) / R1) + * (D * D / 2.0 - (5.0 + 3.0 * T1 + 10.0 * C1 - 4.0 * C1 * C1 + - 9.0 * eccPrimeSquared) * D * D * D * D / 24.0 + + (61.0 + 90.0 * T1 + 298.0 * C1 + 45.0 * T1 * T1 + - 252.0 * eccPrimeSquared - 3.0 * C1 * C1) + * D * D * D * D * D * D / 720.0); + latitude *= 180.0 / M_PI; + + longitude = (D - (1.0 + 2.0 * T1 + C1) * D * D * D / 6.0 + + (5.0 - 2.0 * C1 + 28.0 * T1 - 3.0 * C1 * C1 + + 8.0 * eccPrimeSquared + 24.0 * T1 * T1) + * D * D * D * D * D / 120.0) / cos(phi1Rad); + longitude = LongOrigin + longitude / M_PI * 180.0; +} + +long int +timestampDiff(uint64_t t1, uint64_t t2) +{ + if (t2 > t1) + { + uint64_t d = t2 - t1; + + if (d > std::numeric_limits::max()) + { + return std::numeric_limits::max(); + } + else + { + return d; + } + } + else + { + uint64_t d = t1 - t2; + + if (d > std::numeric_limits::max()) + { + return std::numeric_limits::min(); + } + else + { + return - static_cast(d); + } + } +} + +} diff --git a/camera_models/src/intrinsic_calib.cc b/camera_models/src/intrinsic_calib.cc new file mode 100644 index 0000000..7590a64 --- /dev/null +++ b/camera_models/src/intrinsic_calib.cc @@ -0,0 +1,284 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "camodocal/calib/CameraCalibration.h" +#include "camodocal/chessboard/Chessboard.h" +#include "camodocal/gpl/gpl.h" + +int +main( int argc, char** argv ) +{ + cv::Size boardSize; + float squareSize; + std::string inputDir; + std::string cameraModel; + std::string cameraName; + std::string prefix; + std::string fileExtension; + bool useOpenCV; + bool viewResults; + bool verbose; + + //========= Handling Program options ========= + boost::program_options::options_description desc( "Allowed options" ); + desc.add_options( )( "help", "produce help message" )( + "width,w", + boost::program_options::value< int >( &boardSize.width )->default_value( 8 ), + "Number of inner corners on the chessboard pattern in x direction" )( + "height,h", + boost::program_options::value< int >( &boardSize.height )->default_value( 12 ), + "Number of inner corners on the chessboard pattern in y direction" )( + "size,s", + boost::program_options::value< float >( &squareSize )->default_value( 7.f ), + "Size of one square in mm" )( "input,i", + boost::program_options::value< std::string >( &inputDir )->default_value( "calibrationdata" ), + "Input directory containing chessboard images" )( + "prefix,p", + boost::program_options::value< std::string >( &prefix )->default_value( "left-" ), + "Prefix of images" )( "file-extension,e", + boost::program_options::value< std::string >( &fileExtension )->default_value( ".png" ), + "File extension of images" )( + "camera-model", + boost::program_options::value< std::string >( &cameraModel )->default_value( "mei" ), + "Camera model: kannala-brandt | mei | pinhole" )( + "camera-name", + boost::program_options::value< std::string >( &cameraName )->default_value( "camera" ), + "Name of camera" )( "opencv", + boost::program_options::bool_switch( &useOpenCV )->default_value( true ), + "Use OpenCV to detect corners" )( + "view-results", + boost::program_options::bool_switch( &viewResults )->default_value( false ), + "View results" )( "verbose,v", + boost::program_options::bool_switch( &verbose )->default_value( true ), + "Verbose output" ); + + boost::program_options::positional_options_description pdesc; + pdesc.add( "input", 1 ); + + boost::program_options::variables_map vm; + boost::program_options::store( boost::program_options::command_line_parser( argc, argv ) + .options( desc ) + .positional( pdesc ) + .run( ), + vm ); + boost::program_options::notify( vm ); + + if ( vm.count( "help" ) ) + { + std::cout << desc << std::endl; + return 1; + } + + if ( !boost::filesystem::exists( inputDir ) && !boost::filesystem::is_directory( inputDir ) ) + { + std::cerr << "# ERROR: Cannot find input directory " << inputDir << "." << std::endl; + return 1; + } + + camodocal::Camera::ModelType modelType; + if ( boost::iequals( cameraModel, "kannala-brandt" ) ) + { + modelType = camodocal::Camera::KANNALA_BRANDT; + } + else if ( boost::iequals( cameraModel, "mei" ) ) + { + modelType = camodocal::Camera::MEI; + } + else if ( boost::iequals( cameraModel, "pinhole" ) ) + { + modelType = camodocal::Camera::PINHOLE; + } + else if ( boost::iequals( cameraModel, "pinhole_full" ) ) + { + modelType = camodocal::Camera::PINHOLE_FULL; + } + else if ( boost::iequals( cameraModel, "scaramuzza" ) ) + { + modelType = camodocal::Camera::SCARAMUZZA; + } + else + { + std::cerr << "# ERROR: Unknown camera model: " << cameraModel << std::endl; + return 1; + } + + switch ( modelType ) + { + case camodocal::Camera::KANNALA_BRANDT: + std::cout << "# INFO: Camera model: Kannala-Brandt" << std::endl; + break; + case camodocal::Camera::MEI: + std::cout << "# INFO: Camera model: Mei" << std::endl; + break; + case camodocal::Camera::PINHOLE: + std::cout << "# INFO: Camera model: Pinhole" << std::endl; + break; + case camodocal::Camera::PINHOLE_FULL: + std::cout << "# INFO: Camera model: PinholeFull" << std::endl; + break; + case camodocal::Camera::SCARAMUZZA: + std::cout << "# INFO: Camera model: Scaramuzza-Omnidirect" << std::endl; + break; + } + + // look for images in input directory + std::vector< std::string > imageFilenames; + boost::filesystem::directory_iterator itr; + for ( boost::filesystem::directory_iterator itr( inputDir ); + itr != boost::filesystem::directory_iterator( ); + ++itr ) + { + if ( !boost::filesystem::is_regular_file( itr->status( ) ) ) + { + continue; + } + + std::string filename = itr->path( ).filename( ).string( ); + + // check if prefix matches + if ( !prefix.empty( ) ) + { + if ( filename.compare( 0, prefix.length( ), prefix ) != 0 ) + { + continue; + } + } + + // check if file extension matches + if ( filename.compare( filename.length( ) - fileExtension.length( ), fileExtension.length( ), fileExtension ) + != 0 ) + { + continue; + } + + imageFilenames.push_back( itr->path( ).string( ) ); + + if ( verbose ) + { + std::cerr << "# INFO: Adding " << imageFilenames.back( ) << std::endl; + } + } + + if ( imageFilenames.empty( ) ) + { + std::cerr << "# ERROR: No chessboard images found." << std::endl; + return 1; + } + + if ( verbose ) + { + std::cerr << "# INFO: # images: " << imageFilenames.size( ) << std::endl; + } + + std::sort( imageFilenames.begin( ), imageFilenames.end( ) ); + + cv::Mat image = cv::imread( imageFilenames.front( ), -1 ); + const cv::Size frameSize = image.size( ); + + camodocal::CameraCalibration calibration( modelType, cameraName, frameSize, boardSize, squareSize ); + calibration.setVerbose( verbose ); + + std::vector< bool > chessboardFound( imageFilenames.size( ), false ); + for ( size_t i = 0; i < imageFilenames.size( ); ++i ) + { + image = cv::imread( imageFilenames.at( i ), -1 ); + + camodocal::Chessboard chessboard( boardSize, image ); + + chessboard.findCorners( useOpenCV ); + if ( chessboard.cornersFound( ) ) + { + if ( verbose ) + { + std::cerr << "# INFO: Detected chessboard in image " << i + 1 << ", " + << imageFilenames.at( i ) << std::endl; + } + + calibration.addChessboardData( chessboard.getCorners( ) ); + + cv::Mat sketch; + chessboard.getSketch( ).copyTo( sketch ); + + cv::imshow( "Image", sketch ); + cv::waitKey( 50 ); + } + else if ( verbose ) + { + std::cerr << "# INFO: Did not detect chessboard in image " << i + 1 << std::endl; + } + chessboardFound.at( i ) = chessboard.cornersFound( ); + } + cv::destroyWindow( "Image" ); + + if ( calibration.sampleCount( ) < 10 ) + { + std::cerr << "# ERROR: Insufficient number of detected chessboards." << std::endl; + return 1; + } + + if ( verbose ) + { + std::cerr << "# INFO: Calibrating..." << std::endl; + } + + double startTime = camodocal::timeInSeconds( ); + + calibration.calibrate( ); + calibration.writeParams( cameraName + "_camera_calib.yaml" ); + calibration.writeChessboardData( cameraName + "_chessboard_data.dat" ); + + if ( verbose ) + { + std::cout << "# INFO: Calibration took a total time of " << std::fixed + << std::setprecision( 3 ) << camodocal::timeInSeconds( ) - startTime << " sec.\n"; + } + + if ( verbose ) + { + std::cerr << "# INFO: Wrote calibration file to " + << cameraName + "_camera_calib.yaml" << std::endl; + } + + if ( viewResults ) + { + std::vector< cv::Mat > cbImages; + std::vector< std::string > cbImageFilenames; + + for ( size_t i = 0; i < imageFilenames.size( ); ++i ) + { + if ( !chessboardFound.at( i ) ) + { + continue; + } + + cbImages.push_back( cv::imread( imageFilenames.at( i ), -1 ) ); + cbImageFilenames.push_back( imageFilenames.at( i ) ); + } + + // visualize observed and reprojected points + calibration.drawResults( cbImages ); + + for ( size_t i = 0; i < cbImages.size( ); ++i ) + { + cv::putText( cbImages.at( i ), + cbImageFilenames.at( i ), + cv::Point( 10, 20 ), + cv::FONT_HERSHEY_COMPLEX, + 0.5, + cv::Scalar( 255, 255, 255 ), + 1, + CV_AA ); + cv::imshow( "Image", cbImages.at( i ) ); + cv::waitKey( 0 ); + } + } + + return 0; +} diff --git a/camera_models/src/sparse_graph/Transform.cc b/camera_models/src/sparse_graph/Transform.cc new file mode 100644 index 0000000..db979e9 --- /dev/null +++ b/camera_models/src/sparse_graph/Transform.cc @@ -0,0 +1,77 @@ +#include + +namespace camodocal +{ + +Transform::Transform() +{ + m_q.setIdentity(); + m_t.setZero(); +} + +Transform::Transform(const Eigen::Matrix4d& H) +{ + m_q = Eigen::Quaterniond(H.block<3,3>(0,0)); + m_t = H.block<3,1>(0,3); +} + +Eigen::Quaterniond& +Transform::rotation(void) +{ + return m_q; +} + +const Eigen::Quaterniond& +Transform::rotation(void) const +{ + return m_q; +} + +double* +Transform::rotationData(void) +{ + return m_q.coeffs().data(); +} + +const double* const +Transform::rotationData(void) const +{ + return m_q.coeffs().data(); +} + +Eigen::Vector3d& +Transform::translation(void) +{ + return m_t; +} + +const Eigen::Vector3d& +Transform::translation(void) const +{ + return m_t; +} + +double* +Transform::translationData(void) +{ + return m_t.data(); +} + +const double* const +Transform::translationData(void) const +{ + return m_t.data(); +} + +Eigen::Matrix4d +Transform::toMatrix(void) const +{ + Eigen::Matrix4d H; + H.setIdentity(); + H.block<3,3>(0,0) = m_q.toRotationMatrix(); + H.block<3,1>(0,3) = m_t; + + return H; +} + +} diff --git a/config/master_config.yaml b/config/master_config.yaml new file mode 100644 index 0000000..1cd39f0 --- /dev/null +++ b/config/master_config.yaml @@ -0,0 +1,28 @@ +%YAML:1.0 + + +# general parameters +imu: 1 # if zero we do full 6 dof, otherwise if 1 then 4 dof optimization +output_path: "~/output/" # where to save final trajectory + + +# loop closure saving parameters +load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path' +pose_graph_save_path: "~/output/pose_graph/" # save and load path +save_image: 1 # save image in pose graph for visualization prupose; you can close this function by setting 0 + + +# loop closure parameters +recall_ignore_recent_ct: 40 # how many frames we should skip that are recent (avoid matching to recent frames) +min_score: 0.030 # min score from dbow to try to loop closure with (0.015 default) +pnp_inflation: 10.0 # amount to inflate pnp ransac sigma by (10.0 default) + +max_theta_diff: 170.0 # max orientation diff between keyframe and query frame (degrees) +max_pos_diff: 1.5 # max position diff between keyframe and query frame (meters) +min_loop_feat_num: 35 # minimum number of features needed to be matched between the keyframe and query frame + +# keyframing parameters +skip_dist: 0.02 # meters, distance between keyframes +skip_cnt: 0 # num, skip every n frames (typically don't need unless high framerate) + + diff --git a/config/vins_rviz_config.rviz b/config/vins_rviz_config.rviz new file mode 100644 index 0000000..4225403 --- /dev/null +++ b/config/vins_rviz_config.rviz @@ -0,0 +1,513 @@ +Panels: + - Class: rviz/Displays + Help Height: 0 + Name: Displays + Property Tree Widget: + Expanded: + - /LoopGroup1 + - /LoopGroup1/loopLink1/Namespaces1 + - /VIOGroup1 + Splitter Ratio: 0.465115994 + Tree Height: 242 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.588679016 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: track_image + - Class: rviz/Displays + Help Height: 78 + Name: Displays + Property Tree Widget: + Expanded: ~ + Splitter Ratio: 0.5 + Tree Height: 359 +Toolbars: + toolButtonStyle: 2 +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.0299999993 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 80 + Reference Frame: + Value: true + - Class: rviz/TF + Enabled: true + Frame Timeout: 999 + Frames: + All Enabled: true + cam0: + Value: true + global: + Value: true + imu: + Value: true + Marker Scale: 1 + Name: TF + Show Arrows: false + Show Axes: true + Show Names: true + Tree: + global: + imu: + cam0: + {} + Update Interval: 0 + Value: true + - Class: rviz/Axes + Enabled: false + Length: 0.5 + Name: Axes + Radius: 0.0500000007 + Reference Frame: + Value: false + - Class: rviz/Group + Displays: + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /loop_fusion_node/pose_graph + Name: loopLink + Namespaces: + CameraPoseVisualization: true + Queue Size: 100 + Value: true + - Class: rviz/MarkerArray + Enabled: false + Marker Topic: /loop_fusion_node/camera_pose_visual + Name: loopCamera + Namespaces: + {} + Queue Size: 100 + Value: false + - Alpha: 1 + Axes Length: 0.200000003 + Axes Radius: 0.0199999996 + Class: rviz/PoseWithCovariance + Color: 255; 25; 0 + Covariance: + Orientation: + Alpha: 0.5 + Color: 255; 255; 127 + Color Style: Unique + Frame: Local + Offset: 1 + Scale: 1 + Value: true + Position: + Alpha: 0.300000012 + Color: 204; 51; 204 + Scale: 1 + Value: true + Value: false + Enabled: true + Head Length: 0.300000012 + Head Radius: 0.100000001 + Name: PoseWithCovariance + Shaft Length: 1 + Shaft Radius: 0.0500000007 + Shape: Axes + Topic: /ov_secondary/poseimu + Unreliable: false + Value: true + - Alpha: 1 + Buffer Length: 1 + Class: rviz/Path + Color: 204; 0; 0 + Enabled: true + Head Diameter: 0.300000012 + Head Length: 0.200000003 + Length: 0.300000012 + Line Style: Lines + Line Width: 0.0399999991 + Name: loopPath 1 + Offset: + X: 0 + Y: 0 + Z: 0 + Pose Color: 255; 85; 255 + Pose Style: None + Radius: 0.0299999993 + Shaft Diameter: 0.100000001 + Shaft Length: 0.100000001 + Topic: /loop_fusion_node/path_1 + Unreliable: false + Value: true + - Alpha: 1 + Buffer Length: 1 + Class: rviz/Path + Color: 255; 170; 0 + Enabled: true + Head Diameter: 0.300000012 + Head Length: 0.200000003 + Length: 0.300000012 + Line Style: Lines + Line Width: 0.0399999991 + Name: loopPath 2 + Offset: + X: 0 + Y: 0 + Z: 0 + Pose Color: 255; 85; 255 + Pose Style: None + Radius: 0.0299999993 + Shaft Diameter: 0.100000001 + Shaft Length: 0.100000001 + Topic: /loop_fusion_node/path_2 + Unreliable: false + Value: true + - Alpha: 1 + Buffer Length: 1 + Class: rviz/Path + Color: 170; 0; 255 + Enabled: true + Head Diameter: 0.300000012 + Head Length: 0.200000003 + Length: 0.300000012 + Line Style: Lines + Line Width: 0.0399999991 + Name: loopPath 3 + Offset: + X: 0 + Y: 0 + Z: 0 + Pose Color: 255; 85; 255 + Pose Style: None + Radius: 0.0299999993 + Shaft Diameter: 0.100000001 + Shaft Length: 0.100000001 + Topic: /loop_fusion_node/path_3 + Unreliable: false + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: -0.00362956035 + Min Value: -0.155465692 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud + Color: 255; 255; 255 + Color Transformer: FlatColor + Decay Time: 10 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 2 + Size (m): 0.00999999978 + Style: Points + Topic: /loop_fusion/margin_cloud_loop_rect + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud + Color: 0; 255; 0 + Color Transformer: FlatColor + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 4 + Size (m): 0.00999999978 + Style: Points + Topic: /loop_fusion/point_cloud_loop_rect + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + - Class: rviz/Image + Enabled: true + Image Topic: /ov_secondary/match_image + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: loop_match_Image + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + Enabled: true + Name: LoopGroup + - Class: rviz/Group + Displays: + - Alpha: 1 + Buffer Length: 1 + Class: rviz/Path + Color: 25; 255; 0 + Enabled: true + Head Diameter: 0.300000012 + Head Length: 0.200000003 + Length: 0.300000012 + Line Style: Lines + Line Width: 0.0299999993 + Name: VIOPath + Offset: + X: 0 + Y: 0 + Z: 0 + Pose Color: 255; 85; 255 + Pose Style: None + Radius: 0.0299999993 + Shaft Diameter: 0.100000001 + Shaft Length: 0.100000001 + Topic: /ov_msckf/pathimu + Unreliable: false + Value: true + - Class: rviz/MarkerArray + Enabled: false + Marker Topic: /vins_estimator/camera_pose_visual + Name: CameraMarker + Namespaces: + {} + Queue Size: 100 + Value: false + - Alpha: 1 + Axes Length: 0.200000003 + Axes Radius: 0.0199999996 + Class: rviz/PoseWithCovariance + Color: 255; 25; 0 + Covariance: + Orientation: + Alpha: 0.5 + Color: 255; 255; 127 + Color Style: Unique + Frame: Local + Offset: 1 + Scale: 1 + Value: true + Position: + Alpha: 0.300000012 + Color: 204; 51; 204 + Scale: 1 + Value: true + Value: false + Enabled: false + Head Length: 0.300000012 + Head Radius: 0.100000001 + Name: PoseWithCovariance + Shaft Length: 1 + Shaft Radius: 0.0500000007 + Shape: Axes + Topic: /ov_msckf/poseimu + Unreliable: false + Value: false + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud + Color: 255; 255; 255 + Color Transformer: Intensity + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 2 + Size (m): 0.00999999978 + Style: Points + Topic: /vins_estimator/point_cloud + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: Intensity + Decay Time: 1 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.00999999978 + Style: Points + Topic: /ov_msckf/points_msckf + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud + Color: 0; 255; 0 + Color Transformer: FlatColor + Decay Time: 100 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: HistoryPointCloud + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 1 + Size (m): 0.00999999978 + Style: Points + Topic: /vins_estimator/margin_cloud + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + - Class: rviz/Image + Enabled: true + Image Topic: /ov_msckf/trackhist + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: track_image + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + Enabled: true + Name: VIOGroup + Enabled: true + Global Options: + Background Color: 0; 0; 0 + Default Light: true + Fixed Frame: global + Frame Rate: 30 + Name: root + Tools: + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Topic: /initialpose + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 19.7382278 + Enable Stereo Rendering: + Stereo Eye Separation: 0.0599999987 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Focal Point: + X: 0 + Y: 0 + Z: 0 + Focal Shape Fixed Size: false + Focal Shape Size: 0.0500000007 + Invert Z Axis: false + Name: Current View + Near Clip Distance: 0.00999999978 + Pitch: 0.855398118 + Target Frame: + Value: Orbit (rviz) + Yaw: 0.890398085 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 1056 + Hide Left Dock: false + Hide Right Dock: true + QMainWindow State: 000000ff00000000fd0000000400000000000002a6000003dafc0200000015fb000000100044006900730070006c006100790073010000002800000133000000d700fffffffb000000160074007200610063006b005f0069006d0061006700650100000161000001380000001600fffffffb0000001200530065006c0065006300740069006f006e0000000028000003990000006100fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000001200720061007700200049006d0061006700650000000028000000300000000000000000fb00000012007200610077005f0069006d0061006700650000000028000000f90000000000000000fb0000001a0074007200610063006b0065006400200069006d0061006700650000000028000001d50000000000000000fb00000020006c006f006f0070005f006d0061007400630068005f0069006d0061006700650000000192000000d60000000000000000fb000000100044006900730070006c006100790073000000002800000399000000d700fffffffc000000280000011e0000000000fffffffa000000000100000002fb0000001200720061007700200049006d0061006700650000000000ffffffff0000000000000000fb0000001a0074007200610063006b0065006400200069006d0061006700650100000000000002370000000000000000fb0000001000410052005f0069006d0061006700650100000373000000160000000000000000fb0000001200720061007700200069006d006100670065010000038f000000160000000000000000fb00000020006c006f006f0070005f006d0061007400630068005f0049006d006100670065010000029f000001630000001600fffffffb0000000a0049006d00610067006501000006b7000001770000000000000000fb00000020006c006f006f0070005f006d0061007400630068005f0049006d0061006700650100000183000001250000000000000000fb000000160074007200610063006b005f0069006d00610067006501000002ae00000154000000000000000000000001000001ae000003dafc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000028000003da000000ad00fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073f00000040fc0100000002fb0000000800540069006d006500000000000000073f0000030000fffffffb0000000800540069006d0065010000000000000450000000000000000000000493000003da00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1855 + X: 65 + Y: 24 + loop_match_Image: + collapsed: false + track_image: + collapsed: false diff --git a/data/.gitignore b/data/.gitignore new file mode 100644 index 0000000..7891b51 --- /dev/null +++ b/data/.gitignore @@ -0,0 +1,2 @@ +algorithms/ +truths/ \ No newline at end of file diff --git a/data/ate_eth.png b/data/ate_eth.png new file mode 100644 index 0000000..bd0ce81 Binary files /dev/null and b/data/ate_eth.png differ diff --git a/data/ate_tumvi.png b/data/ate_tumvi.png new file mode 100644 index 0000000..3e7aaba Binary files /dev/null and b/data/ate_tumvi.png differ diff --git a/data/tum_example.png b/data/tum_example.png new file mode 100644 index 0000000..d608610 Binary files /dev/null and b/data/tum_example.png differ diff --git a/loop_fusion/CMakeLists.txt b/loop_fusion/CMakeLists.txt new file mode 100644 index 0000000..b729296 --- /dev/null +++ b/loop_fusion/CMakeLists.txt @@ -0,0 +1,52 @@ +cmake_minimum_required(VERSION 2.8.3) +project(loop_fusion) + +set(CMAKE_BUILD_TYPE "Release") +set(CMAKE_CXX_FLAGS "-std=c++11") +#-DEIGEN_USE_MKL_ALL") +set(CMAKE_CXX_FLAGS_RELEASE "-O3 -Wall -g") + +find_package(catkin REQUIRED COMPONENTS + roscpp + std_msgs + nav_msgs + camera_models + cv_bridge + roslib + ) + + +set(OpenCV_DIR "/home/admin1/podmivan/opencv-3.4.16/build") +set(CUDA_DIR "/usr/local/cuda-11.5") +find_package(OpenCV 3 REQUIRED) +message(STATUS "1 OPENCV: " ${OpenCV_VERSION} " ") +find_package(Ceres REQUIRED) +find_package(Eigen3) + +include_directories(${catkin_INCLUDE_DIRS} ${CERES_INCLUDE_DIRS} ${EIGEN3_INCLUDE_DIR}) + +catkin_package() + +message(STATUS "2 OPENCV: " ${OpenCV_VERSION} " ") + +add_executable(loop_fusion_node + src/pose_graph_node.cpp + src/pose_graph.cpp + src/keyframe.cpp + src/utility/CameraPoseVisualization.cpp + src/ThirdParty/DBoW/BowVector.cpp + src/ThirdParty/DBoW/FBrief.cpp + src/ThirdParty/DBoW/FeatureVector.cpp + src/ThirdParty/DBoW/QueryResults.cpp + src/ThirdParty/DBoW/ScoringObject.cpp + src/ThirdParty/DUtils/Random.cpp + src/ThirdParty/DUtils/Timestamp.cpp + src/ThirdParty/DVision/BRIEF.cpp + src/ThirdParty/VocabularyBinary.cpp + ) + +message(STATUS "3 OPENCV: " ${OpenCV_VERSION} " ") + +target_link_libraries(loop_fusion_node ${catkin_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES}) + +message(STATUS "4 OPENCV: " ${OpenCV_VERSION} " ") \ No newline at end of file diff --git a/loop_fusion/launch/posegraph.launch b/loop_fusion/launch/posegraph.launch new file mode 100644 index 0000000..c058286 --- /dev/null +++ b/loop_fusion/launch/posegraph.launch @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/loop_fusion/launch/record_eth.launch b/loop_fusion/launch/record_eth.launch new file mode 100644 index 0000000..872a088 --- /dev/null +++ b/loop_fusion/launch/record_eth.launch @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/loop_fusion/launch/record_tumvi.launch b/loop_fusion/launch/record_tumvi.launch new file mode 100644 index 0000000..687ed51 --- /dev/null +++ b/loop_fusion/launch/record_tumvi.launch @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/loop_fusion/package.xml b/loop_fusion/package.xml new file mode 100644 index 0000000..819eee3 --- /dev/null +++ b/loop_fusion/package.xml @@ -0,0 +1,53 @@ + + + loop_fusion + 0.0.0 + loop_fusion package + + + + + tony-ws + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + + + catkin + camera_models + camera_models + + + + + + + + + \ No newline at end of file diff --git a/loop_fusion/scripts/run_ros_eth.sh b/loop_fusion/scripts/run_ros_eth.sh new file mode 100755 index 0000000..8e4b630 --- /dev/null +++ b/loop_fusion/scripts/run_ros_eth.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# Source our workspace directory to load ENV variables +source /home/patrick/workspace/catkin_ws_ov/devel/setup.bash + + +#============================================================= +#============================================================= +#============================================================= + + +# estimator configurations +modes=( + "mono" + "stereo" +) + +# dataset locations +bagnames=( + "V1_01_easy" + "V1_02_medium" + "V1_03_difficult" + "V2_01_easy" + "V2_02_medium" + "V2_03_difficult" +) + + + +#============================================================= +#============================================================= +#============================================================= + + +# Loop through all modes +for h in "${!modes[@]}"; do +# Loop through all datasets +for i in "${!bagnames[@]}"; do + +# Monte Carlo runs for this dataset +# If you want more runs, change the below loop +for j in {00..04}; do + +# start timing +start_time="$(date -u +%s)" + +# number of cameras +if [ "${modes[h]}" == "mono" ] +then + temp1="1" +fi +if [ "${modes[h]}" == "stereo" ] +then + temp1="2" +fi + +# run our ROS launch file (note we send console output to terminator) +roslaunch loop_fusion record_eth.launch max_cameras:="$temp1" dataset:="${bagnames[i]}" run_number:="$j" mode_type:="${modes[h]}" &> /dev/null + +# print out the time elapsed +end_time="$(date -u +%s)" +elapsed="$(($end_time-$start_time))" +echo "BASH: ${modes[h]} - ${bagnames[i]} - run $j took $elapsed seconds"; + +done + + +done +done + + diff --git a/loop_fusion/scripts/run_ros_tumvi.sh b/loop_fusion/scripts/run_ros_tumvi.sh new file mode 100755 index 0000000..dccf184 --- /dev/null +++ b/loop_fusion/scripts/run_ros_tumvi.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# Source our workspace directory to load ENV variables +source /home/patrick/workspace/catkin_ws_ov/devel/setup.bash + + +#============================================================= +#============================================================= +#============================================================= + + +# estimator configurations +modes=( + "mono" + "stereo" +) + +# dataset locations +bagnames=( + "dataset-room1_512_16" + "dataset-room2_512_16" + "dataset-room3_512_16" + "dataset-room4_512_16" + "dataset-room5_512_16" + "dataset-room6_512_16" +) + + + +#============================================================= +#============================================================= +#============================================================= + + +# Loop through all modes +for h in "${!modes[@]}"; do +# Loop through all datasets +for i in "${!bagnames[@]}"; do + +# Monte Carlo runs for this dataset +# If you want more runs, change the below loop +for j in {00..04}; do + +# start timing +start_time="$(date -u +%s)" + +# number of cameras +if [ "${modes[h]}" == "mono" ] +then + temp1="1" +fi +if [ "${modes[h]}" == "stereo" ] +then + temp1="2" +fi + +# run our ROS launch file (note we send console output to terminator) +roslaunch loop_fusion record_tumvi.launch max_cameras:="$temp1" dataset:="${bagnames[i]}" run_number:="$j" mode_type:="${modes[h]}" &> /dev/null + +# print out the time elapsed +end_time="$(date -u +%s)" +elapsed="$(($end_time-$start_time))" +echo "BASH: ${modes[h]} - ${bagnames[i]} - run $j took $elapsed seconds"; + +done + + +done +done + + diff --git a/loop_fusion/src/ThirdParty/DBoW/BowVector.cpp b/loop_fusion/src/ThirdParty/DBoW/BowVector.cpp new file mode 100644 index 0000000..1337fa3 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/BowVector.cpp @@ -0,0 +1,130 @@ +/** + * File: BowVector.cpp + * Date: March 2011 + * Author: Dorian Galvez-Lopez + * Description: bag of words vector + * License: see the LICENSE.txt file + * + */ + +#include +#include +#include +#include +#include + +#include "BowVector.h" + +namespace DBoW2 { + +// -------------------------------------------------------------------------- + +BowVector::BowVector(void) +{ +} + +// -------------------------------------------------------------------------- + +BowVector::~BowVector(void) +{ +} + +// -------------------------------------------------------------------------- + +void BowVector::addWeight(WordId id, WordValue v) +{ + BowVector::iterator vit = this->lower_bound(id); + + if(vit != this->end() && !(this->key_comp()(id, vit->first))) + { + vit->second += v; + } + else + { + this->insert(vit, BowVector::value_type(id, v)); + } +} + +// -------------------------------------------------------------------------- + +void BowVector::addIfNotExist(WordId id, WordValue v) +{ + BowVector::iterator vit = this->lower_bound(id); + + if(vit == this->end() || (this->key_comp()(id, vit->first))) + { + this->insert(vit, BowVector::value_type(id, v)); + } +} + +// -------------------------------------------------------------------------- + +void BowVector::normalize(LNorm norm_type) +{ + double norm = 0.0; + BowVector::iterator it; + + if(norm_type == DBoW2::L1) + { + for(it = begin(); it != end(); ++it) + norm += fabs(it->second); + } + else + { + for(it = begin(); it != end(); ++it) + norm += it->second * it->second; + norm = sqrt(norm); + } + + if(norm > 0.0) + { + for(it = begin(); it != end(); ++it) + it->second /= norm; + } +} + +// -------------------------------------------------------------------------- + +std::ostream& operator<< (std::ostream &out, const BowVector &v) +{ + BowVector::const_iterator vit; + std::vector::const_iterator iit; + unsigned int i = 0; + const unsigned int N = v.size(); + for(vit = v.begin(); vit != v.end(); ++vit, ++i) + { + out << "<" << vit->first << ", " << vit->second << ">"; + + if(i < N-1) out << ", "; + } + return out; +} + +// -------------------------------------------------------------------------- + +void BowVector::saveM(const std::string &filename, size_t W) const +{ + std::fstream f(filename.c_str(), std::ios::out); + + WordId last = 0; + BowVector::const_iterator bit; + for(bit = this->begin(); bit != this->end(); ++bit) + { + for(; last < bit->first; ++last) + { + f << "0 "; + } + f << bit->second << " "; + + last = bit->first + 1; + } + for(; last < (WordId)W; ++last) + f << "0 "; + + f.close(); +} + +// -------------------------------------------------------------------------- + +} // namespace DBoW2 + diff --git a/loop_fusion/src/ThirdParty/DBoW/BowVector.h b/loop_fusion/src/ThirdParty/DBoW/BowVector.h new file mode 100644 index 0000000..670b635 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/BowVector.h @@ -0,0 +1,109 @@ +/** + * File: BowVector.h + * Date: March 2011 + * Author: Dorian Galvez-Lopez + * Description: bag of words vector + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_BOW_VECTOR__ +#define __D_T_BOW_VECTOR__ + +#include +#include +#include + +namespace DBoW2 { + +/// Id of words +typedef unsigned int WordId; + +/// Value of a word +typedef double WordValue; + +/// Id of nodes in the vocabulary treee +typedef unsigned int NodeId; + +/// L-norms for normalization +enum LNorm +{ + L1, + L2 +}; + +/// Weighting type +enum WeightingType +{ + TF_IDF, + TF, + IDF, + BINARY +}; + +/// Scoring type +enum ScoringType +{ + L1_NORM, + L2_NORM, + CHI_SQUARE, + KL, + BHATTACHARYYA, + DOT_PRODUCT +}; + +/// Vector of words to represent images +class BowVector: + public std::map +{ +public: + + /** + * Constructor + */ + BowVector(void); + + /** + * Destructor + */ + ~BowVector(void); + + /** + * Adds a value to a word value existing in the vector, or creates a new + * word with the given value + * @param id word id to look for + * @param v value to create the word with, or to add to existing word + */ + void addWeight(WordId id, WordValue v); + + /** + * Adds a word with a value to the vector only if this does not exist yet + * @param id word id to look for + * @param v value to give to the word if this does not exist + */ + void addIfNotExist(WordId id, WordValue v); + + /** + * L1-Normalizes the values in the vector + * @param norm_type norm used + */ + void normalize(LNorm norm_type); + + /** + * Prints the content of the bow vector + * @param out stream + * @param v + */ + friend std::ostream& operator<<(std::ostream &out, const BowVector &v); + + /** + * Saves the bow vector as a vector in a matlab file + * @param filename + * @param W number of words in the vocabulary + */ + void saveM(const std::string &filename, size_t W) const; +}; + +} // namespace DBoW2 + +#endif diff --git a/loop_fusion/src/ThirdParty/DBoW/DBoW2.h b/loop_fusion/src/ThirdParty/DBoW/DBoW2.h new file mode 100644 index 0000000..2402069 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/DBoW2.h @@ -0,0 +1,79 @@ +/* + * File: DBoW2.h + * Date: November 2011 + * Author: Dorian Galvez-Lopez + * Description: Generic include file for the DBoW2 classes and + * the specialized vocabularies and databases + * License: see the LICENSE.txt file + * + */ + +/*! \mainpage DBoW2 Library + * + * DBoW2 library for C++: + * Bag-of-word image database for image retrieval. + * + * Written by Dorian Galvez-Lopez, + * University of Zaragoza + * + * Check my website to obtain updates: http://doriangalvez.com + * + * \section requirements Requirements + * This library requires the DUtils, DUtilsCV, DVision and OpenCV libraries, + * as well as the boost::dynamic_bitset class. + * + * \section citation Citation + * If you use this software in academic works, please cite: +
+   @@ARTICLE{GalvezTRO12,
+    author={Galvez-Lopez, Dorian and Tardos, J. D.}, 
+    journal={IEEE Transactions on Robotics},
+    title={Bags of Binary Words for Fast Place Recognition in Image Sequences},
+    year={2012},
+    month={October},
+    volume={28},
+    number={5},
+    pages={1188--1197},
+    doi={10.1109/TRO.2012.2197158},
+    ISSN={1552-3098}
+  }
+ 
+ * + * \section license License + * This file is licensed under a Creative Commons + * Attribution-NonCommercial-ShareAlike 3.0 license. + * This file can be freely used and users can use, download and edit this file + * provided that credit is attributed to the original author. No users are + * permitted to use this file for commercial purposes unless explicit permission + * is given by the original author. Derivative works must be licensed using the + * same or similar license. + * Check http://creativecommons.org/licenses/by-nc-sa/3.0/ to obtain further + * details. + * + */ + +#ifndef __D_T_DBOW2__ +#define __D_T_DBOW2__ + +/// Includes all the data structures to manage vocabularies and image databases +namespace DBoW2 +{ +} + +#include "TemplatedVocabulary.h" +#include "TemplatedDatabase.h" +#include "BowVector.h" +#include "FeatureVector.h" +#include "QueryResults.h" +#include "FBrief.h" + +/// BRIEF Vocabulary +typedef DBoW2::TemplatedVocabulary + BriefVocabulary; + +/// BRIEF Database +typedef DBoW2::TemplatedDatabase + BriefDatabase; + +#endif + diff --git a/loop_fusion/src/ThirdParty/DBoW/FBrief.cpp b/loop_fusion/src/ThirdParty/DBoW/FBrief.cpp new file mode 100644 index 0000000..c447772 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/FBrief.cpp @@ -0,0 +1,108 @@ +/** + * File: FBrief.cpp + * Date: November 2011 + * Author: Dorian Galvez-Lopez + * Description: functions for BRIEF descriptors + * License: see the LICENSE.txt file + * + */ + +#include +#include +#include + +#include "FBrief.h" + +using namespace std; + +namespace DBoW2 { + +// -------------------------------------------------------------------------- + +void FBrief::meanValue(const std::vector &descriptors, + FBrief::TDescriptor &mean) +{ + mean.reset(); + + if(descriptors.empty()) return; + + const int N2 = descriptors.size() / 2; + const int L = descriptors[0]->size(); + + vector counters(L, 0); + + vector::const_iterator it; + for(it = descriptors.begin(); it != descriptors.end(); ++it) + { + const FBrief::TDescriptor &desc = **it; + for(int i = 0; i < L; ++i) + { + if(desc[i]) counters[i]++; + } + } + + for(int i = 0; i < L; ++i) + { + if(counters[i] > N2) mean.set(i); + } + +} + +// -------------------------------------------------------------------------- + +double FBrief::distance(const FBrief::TDescriptor &a, + const FBrief::TDescriptor &b) +{ + return (double)DVision::BRIEF::distance(a, b); +} + +// -------------------------------------------------------------------------- + +std::string FBrief::toString(const FBrief::TDescriptor &a) +{ + // from boost::bitset + string s; + to_string(a, s); // reversed + return s; +} + +// -------------------------------------------------------------------------- + +void FBrief::fromString(FBrief::TDescriptor &a, const std::string &s) +{ + // from boost::bitset + stringstream ss(s); + ss >> a; +} + +// -------------------------------------------------------------------------- + +void FBrief::toMat32F(const std::vector &descriptors, + cv::Mat &mat) +{ + if(descriptors.empty()) + { + mat.release(); + return; + } + + const int N = descriptors.size(); + const int L = descriptors[0].size(); + + mat.create(N, L, CV_32F); + + for(int i = 0; i < N; ++i) + { + const TDescriptor& desc = descriptors[i]; + float *p = mat.ptr(i); + for(int j = 0; j < L; ++j, ++p) + { + *p = (desc[j] ? 1 : 0); + } + } +} + +// -------------------------------------------------------------------------- + +} // namespace DBoW2 + diff --git a/loop_fusion/src/ThirdParty/DBoW/FBrief.h b/loop_fusion/src/ThirdParty/DBoW/FBrief.h new file mode 100644 index 0000000..d0bfa21 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/FBrief.h @@ -0,0 +1,73 @@ +/** + * File: FBrief.h + * Date: November 2011 + * Author: Dorian Galvez-Lopez + * Description: functions for BRIEF descriptors + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_F_BRIEF__ +#define __D_T_F_BRIEF__ + +#include +#include +#include + +#include "FClass.h" +#include "../DVision/DVision.h" + +namespace DBoW2 { + +/// Functions to manipulate BRIEF descriptors +class FBrief: protected FClass +{ +public: + + typedef DVision::BRIEF::bitset TDescriptor; + typedef const TDescriptor *pDescriptor; + + /** + * Calculates the mean value of a set of descriptors + * @param descriptors + * @param mean mean descriptor + */ + static void meanValue(const std::vector &descriptors, + TDescriptor &mean); + + /** + * Calculates the distance between two descriptors + * @param a + * @param b + * @return distance + */ + static double distance(const TDescriptor &a, const TDescriptor &b); + + /** + * Returns a string version of the descriptor + * @param a descriptor + * @return string version + */ + static std::string toString(const TDescriptor &a); + + /** + * Returns a descriptor from a string + * @param a descriptor + * @param s string version + */ + static void fromString(TDescriptor &a, const std::string &s); + + /** + * Returns a mat with the descriptors in float format + * @param descriptors + * @param mat (out) NxL 32F matrix + */ + static void toMat32F(const std::vector &descriptors, + cv::Mat &mat); + +}; + +} // namespace DBoW2 + +#endif + diff --git a/loop_fusion/src/ThirdParty/DBoW/FClass.h b/loop_fusion/src/ThirdParty/DBoW/FClass.h new file mode 100644 index 0000000..adb8586 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/FClass.h @@ -0,0 +1,71 @@ +/** + * File: FClass.h + * Date: November 2011 + * Author: Dorian Galvez-Lopez + * Description: generic FClass to instantiate templated classes + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_FCLASS__ +#define __D_T_FCLASS__ + +#include +#include +#include + +namespace DBoW2 { + +/// Generic class to encapsulate functions to manage descriptors. +/** + * This class must be inherited. Derived classes can be used as the + * parameter F when creating Templated structures + * (TemplatedVocabulary, TemplatedDatabase, ...) + */ +class FClass +{ + class TDescriptor; + typedef const TDescriptor *pDescriptor; + + /** + * Calculates the mean value of a set of descriptors + * @param descriptors + * @param mean mean descriptor + */ + virtual void meanValue(const std::vector &descriptors, + TDescriptor &mean) = 0; + + /** + * Calculates the distance between two descriptors + * @param a + * @param b + * @return distance + */ + static double distance(const TDescriptor &a, const TDescriptor &b); + + /** + * Returns a string version of the descriptor + * @param a descriptor + * @return string version + */ + static std::string toString(const TDescriptor &a); + + /** + * Returns a descriptor from a string + * @param a descriptor + * @param s string version + */ + static void fromString(TDescriptor &a, const std::string &s); + + /** + * Returns a mat with the descriptors in float format + * @param descriptors + * @param mat (out) NxL 32F matrix + */ + static void toMat32F(const std::vector &descriptors, + cv::Mat &mat); +}; + +} // namespace DBoW2 + +#endif diff --git a/loop_fusion/src/ThirdParty/DBoW/FeatureVector.cpp b/loop_fusion/src/ThirdParty/DBoW/FeatureVector.cpp new file mode 100644 index 0000000..c055a15 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/FeatureVector.cpp @@ -0,0 +1,85 @@ +/** + * File: FeatureVector.cpp + * Date: November 2011 + * Author: Dorian Galvez-Lopez + * Description: feature vector + * License: see the LICENSE.txt file + * + */ + +#include "FeatureVector.h" +#include +#include +#include + +namespace DBoW2 { + +// --------------------------------------------------------------------------- + +FeatureVector::FeatureVector(void) +{ +} + +// --------------------------------------------------------------------------- + +FeatureVector::~FeatureVector(void) +{ +} + +// --------------------------------------------------------------------------- + +void FeatureVector::addFeature(NodeId id, unsigned int i_feature) +{ + FeatureVector::iterator vit = this->lower_bound(id); + + if(vit != this->end() && vit->first == id) + { + vit->second.push_back(i_feature); + } + else + { + vit = this->insert(vit, FeatureVector::value_type(id, + std::vector() )); + vit->second.push_back(i_feature); + } +} + +// --------------------------------------------------------------------------- + +std::ostream& operator<<(std::ostream &out, + const FeatureVector &v) +{ + if(!v.empty()) + { + FeatureVector::const_iterator vit = v.begin(); + + const std::vector* f = &vit->second; + + out << "<" << vit->first << ": ["; + if(!f->empty()) out << (*f)[0]; + for(unsigned int i = 1; i < f->size(); ++i) + { + out << ", " << (*f)[i]; + } + out << "]>"; + + for(++vit; vit != v.end(); ++vit) + { + f = &vit->second; + + out << ", <" << vit->first << ": ["; + if(!f->empty()) out << (*f)[0]; + for(unsigned int i = 1; i < f->size(); ++i) + { + out << ", " << (*f)[i]; + } + out << "]>"; + } + } + + return out; +} + +// --------------------------------------------------------------------------- + +} // namespace DBoW2 diff --git a/loop_fusion/src/ThirdParty/DBoW/FeatureVector.h b/loop_fusion/src/ThirdParty/DBoW/FeatureVector.h new file mode 100644 index 0000000..08a91de --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/FeatureVector.h @@ -0,0 +1,56 @@ +/** + * File: FeatureVector.h + * Date: November 2011 + * Author: Dorian Galvez-Lopez + * Description: feature vector + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_FEATURE_VECTOR__ +#define __D_T_FEATURE_VECTOR__ + +#include "BowVector.h" +#include +#include +#include + +namespace DBoW2 { + +/// Vector of nodes with indexes of local features +class FeatureVector: + public std::map > +{ +public: + + /** + * Constructor + */ + FeatureVector(void); + + /** + * Destructor + */ + ~FeatureVector(void); + + /** + * Adds a feature to an existing node, or adds a new node with an initial + * feature + * @param id node id to add or to modify + * @param i_feature index of feature to add to the given node + */ + void addFeature(NodeId id, unsigned int i_feature); + + /** + * Sends a string versions of the feature vector through the stream + * @param out stream + * @param v feature vector + */ + friend std::ostream& operator<<(std::ostream &out, const FeatureVector &v); + +}; + +} // namespace DBoW2 + +#endif + diff --git a/loop_fusion/src/ThirdParty/DBoW/QueryResults.cpp b/loop_fusion/src/ThirdParty/DBoW/QueryResults.cpp new file mode 100644 index 0000000..01897ca --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/QueryResults.cpp @@ -0,0 +1,63 @@ +/** + * File: QueryResults.cpp + * Date: March, November 2011 + * Author: Dorian Galvez-Lopez + * Description: structure to store results of database queries + * License: see the LICENSE.txt file + * + */ + +#include +#include +#include "QueryResults.h" + +using namespace std; + +namespace DBoW2 +{ + +// --------------------------------------------------------------------------- + +ostream & operator<<(ostream& os, const Result& ret ) +{ + os << ""; + return os; +} + +// --------------------------------------------------------------------------- + +ostream & operator<<(ostream& os, const QueryResults& ret ) +{ + if(ret.size() == 1) + os << "1 result:" << endl; + else + os << ret.size() << " results:" << endl; + + QueryResults::const_iterator rit; + for(rit = ret.begin(); rit != ret.end(); ++rit) + { + os << *rit; + if(rit + 1 != ret.end()) os << endl; + } + return os; +} + +// --------------------------------------------------------------------------- + +void QueryResults::saveM(const std::string &filename) const +{ + fstream f(filename.c_str(), ios::out); + + QueryResults::const_iterator qit; + for(qit = begin(); qit != end(); ++qit) + { + f << qit->Id << " " << qit->Score << endl; + } + + f.close(); +} + +// --------------------------------------------------------------------------- + +} // namespace DBoW2 + diff --git a/loop_fusion/src/ThirdParty/DBoW/QueryResults.h b/loop_fusion/src/ThirdParty/DBoW/QueryResults.h new file mode 100644 index 0000000..2490bce --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/QueryResults.h @@ -0,0 +1,205 @@ +/** + * File: QueryResults.h + * Date: March, November 2011 + * Author: Dorian Galvez-Lopez + * Description: structure to store results of database queries + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_QUERY_RESULTS__ +#define __D_T_QUERY_RESULTS__ + +#include + +namespace DBoW2 { + +/// Id of entries of the database +typedef unsigned int EntryId; + +/// Single result of a query +class Result +{ +public: + + /// Entry id + EntryId Id; + + /// Score obtained + double Score; + + /// debug + int nWords; // words in common + // !!! this is filled only by Bhatt score! + // (and for BCMatching, BCThresholding then) + + double bhatScore, chiScore; + /// debug + + // only done by ChiSq and BCThresholding + double sumCommonVi; + double sumCommonWi; + double expectedChiScore; + /// debug + + /** + * Empty constructors + */ + inline Result(){} + + /** + * Creates a result with the given data + * @param _id entry id + * @param _score score + */ + inline Result(EntryId _id, double _score): Id(_id), Score(_score){} + + /** + * Compares the scores of two results + * @return true iff this.score < r.score + */ + inline bool operator<(const Result &r) const + { + return this->Score < r.Score; + } + + /** + * Compares the scores of two results + * @return true iff this.score > r.score + */ + inline bool operator>(const Result &r) const + { + return this->Score > r.Score; + } + + /** + * Compares the entry id of the result + * @return true iff this.id == id + */ + inline bool operator==(EntryId id) const + { + return this->Id == id; + } + + /** + * Compares the score of this entry with a given one + * @param s score to compare with + * @return true iff this score < s + */ + inline bool operator<(double s) const + { + return this->Score < s; + } + + /** + * Compares the score of this entry with a given one + * @param s score to compare with + * @return true iff this score > s + */ + inline bool operator>(double s) const + { + return this->Score > s; + } + + /** + * Compares the score of two results + * @param a + * @param b + * @return true iff a.Score > b.Score + */ + static inline bool gt(const Result &a, const Result &b) + { + return a.Score > b.Score; + } + + /** + * Compares the scores of two results + * @return true iff a.Score > b.Score + */ + inline static bool ge(const Result &a, const Result &b) + { + return a.Score > b.Score; + } + + /** + * Returns true iff a.Score >= b.Score + * @param a + * @param b + * @return true iff a.Score >= b.Score + */ + static inline bool geq(const Result &a, const Result &b) + { + return a.Score >= b.Score; + } + + /** + * Returns true iff a.Score >= s + * @param a + * @param s + * @return true iff a.Score >= s + */ + static inline bool geqv(const Result &a, double s) + { + return a.Score >= s; + } + + + /** + * Returns true iff a.Id < b.Id + * @param a + * @param b + * @return true iff a.Id < b.Id + */ + static inline bool ltId(const Result &a, const Result &b) + { + return a.Id < b.Id; + } + + /** + * Prints a string version of the result + * @param os ostream + * @param ret Result to print + */ + friend std::ostream & operator<<(std::ostream& os, const Result& ret ); +}; + +/// Multiple results from a query +class QueryResults: public std::vector +{ +public: + + /** + * Multiplies all the scores in the vector by factor + * @param factor + */ + inline void scaleScores(double factor); + + /** + * Prints a string version of the results + * @param os ostream + * @param ret QueryResults to print + */ + friend std::ostream & operator<<(std::ostream& os, const QueryResults& ret ); + + /** + * Saves a matlab file with the results + * @param filename + */ + void saveM(const std::string &filename) const; + +}; + +// -------------------------------------------------------------------------- + +inline void QueryResults::scaleScores(double factor) +{ + for(QueryResults::iterator qit = begin(); qit != end(); ++qit) + qit->Score *= factor; +} + +// -------------------------------------------------------------------------- + +} // namespace TemplatedBoW + +#endif + diff --git a/loop_fusion/src/ThirdParty/DBoW/ScoringObject.cpp b/loop_fusion/src/ThirdParty/DBoW/ScoringObject.cpp new file mode 100644 index 0000000..063a96e --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/ScoringObject.cpp @@ -0,0 +1,315 @@ +/** + * File: ScoringObject.cpp + * Date: November 2011 + * Author: Dorian Galvez-Lopez + * Description: functions to compute bow scores + * License: see the LICENSE.txt file + * + */ + +#include +#include "TemplatedVocabulary.h" +#include "BowVector.h" + +using namespace DBoW2; + +// If you change the type of WordValue, make sure you change also the +// epsilon value (this is needed by the KL method) +const double GeneralScoring::LOG_EPS = log(DBL_EPSILON); // FLT_EPSILON + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- + +double L1Scoring::score(const BowVector &v1, const BowVector &v2) const +{ + BowVector::const_iterator v1_it, v2_it; + const BowVector::const_iterator v1_end = v1.end(); + const BowVector::const_iterator v2_end = v2.end(); + + v1_it = v1.begin(); + v2_it = v2.begin(); + + double score = 0; + + while(v1_it != v1_end && v2_it != v2_end) + { + const WordValue& vi = v1_it->second; + const WordValue& wi = v2_it->second; + + if(v1_it->first == v2_it->first) + { + score += fabs(vi - wi) - fabs(vi) - fabs(wi); + + // move v1 and v2 forward + ++v1_it; + ++v2_it; + } + else if(v1_it->first < v2_it->first) + { + // move v1 forward + v1_it = v1.lower_bound(v2_it->first); + // v1_it = (first element >= v2_it.id) + } + else + { + // move v2 forward + v2_it = v2.lower_bound(v1_it->first); + // v2_it = (first element >= v1_it.id) + } + } + + // ||v - w||_{L1} = 2 + Sum(|v_i - w_i| - |v_i| - |w_i|) + // for all i | v_i != 0 and w_i != 0 + // (Nister, 2006) + // scaled_||v - w||_{L1} = 1 - 0.5 * ||v - w||_{L1} + score = -score/2.0; + + return score; // [0..1] +} + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- + +double L2Scoring::score(const BowVector &v1, const BowVector &v2) const +{ + BowVector::const_iterator v1_it, v2_it; + const BowVector::const_iterator v1_end = v1.end(); + const BowVector::const_iterator v2_end = v2.end(); + + v1_it = v1.begin(); + v2_it = v2.begin(); + + double score = 0; + + while(v1_it != v1_end && v2_it != v2_end) + { + const WordValue& vi = v1_it->second; + const WordValue& wi = v2_it->second; + + if(v1_it->first == v2_it->first) + { + score += vi * wi; + + // move v1 and v2 forward + ++v1_it; + ++v2_it; + } + else if(v1_it->first < v2_it->first) + { + // move v1 forward + v1_it = v1.lower_bound(v2_it->first); + // v1_it = (first element >= v2_it.id) + } + else + { + // move v2 forward + v2_it = v2.lower_bound(v1_it->first); + // v2_it = (first element >= v1_it.id) + } + } + + // ||v - w||_{L2} = sqrt( 2 - 2 * Sum(v_i * w_i) ) + // for all i | v_i != 0 and w_i != 0 ) + // (Nister, 2006) + if(score >= 1) // rounding errors + score = 1.0; + else + score = 1.0 - sqrt(1.0 - score); // [0..1] + + return score; +} + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- + +double ChiSquareScoring::score(const BowVector &v1, const BowVector &v2) + const +{ + BowVector::const_iterator v1_it, v2_it; + const BowVector::const_iterator v1_end = v1.end(); + const BowVector::const_iterator v2_end = v2.end(); + + v1_it = v1.begin(); + v2_it = v2.begin(); + + double score = 0; + + // all the items are taken into account + + while(v1_it != v1_end && v2_it != v2_end) + { + const WordValue& vi = v1_it->second; + const WordValue& wi = v2_it->second; + + if(v1_it->first == v2_it->first) + { + // (v-w)^2/(v+w) - v - w = -4 vw/(v+w) + // we move the -4 out + if(vi + wi != 0.0) score += vi * wi / (vi + wi); + + // move v1 and v2 forward + ++v1_it; + ++v2_it; + } + else if(v1_it->first < v2_it->first) + { + // move v1 forward + v1_it = v1.lower_bound(v2_it->first); + } + else + { + // move v2 forward + v2_it = v2.lower_bound(v1_it->first); + } + } + + // this takes the -4 into account + score = 2. * score; // [0..1] + + return score; +} + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- + +double KLScoring::score(const BowVector &v1, const BowVector &v2) const +{ + BowVector::const_iterator v1_it, v2_it; + const BowVector::const_iterator v1_end = v1.end(); + const BowVector::const_iterator v2_end = v2.end(); + + v1_it = v1.begin(); + v2_it = v2.begin(); + + double score = 0; + + // all the items or v are taken into account + + while(v1_it != v1_end && v2_it != v2_end) + { + const WordValue& vi = v1_it->second; + const WordValue& wi = v2_it->second; + + if(v1_it->first == v2_it->first) + { + if(vi != 0 && wi != 0) score += vi * log(vi/wi); + + // move v1 and v2 forward + ++v1_it; + ++v2_it; + } + else if(v1_it->first < v2_it->first) + { + // move v1 forward + score += vi * (log(vi) - LOG_EPS); + ++v1_it; + } + else + { + // move v2_it forward, do not add any score + v2_it = v2.lower_bound(v1_it->first); + // v2_it = (first element >= v1_it.id) + } + } + + // sum rest of items of v + for(; v1_it != v1_end; ++v1_it) + if(v1_it->second != 0) + score += v1_it->second * (log(v1_it->second) - LOG_EPS); + + return score; // cannot be scaled +} + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- + +double BhattacharyyaScoring::score(const BowVector &v1, + const BowVector &v2) const +{ + BowVector::const_iterator v1_it, v2_it; + const BowVector::const_iterator v1_end = v1.end(); + const BowVector::const_iterator v2_end = v2.end(); + + v1_it = v1.begin(); + v2_it = v2.begin(); + + double score = 0; + + while(v1_it != v1_end && v2_it != v2_end) + { + const WordValue& vi = v1_it->second; + const WordValue& wi = v2_it->second; + + if(v1_it->first == v2_it->first) + { + score += sqrt(vi * wi); + + // move v1 and v2 forward + ++v1_it; + ++v2_it; + } + else if(v1_it->first < v2_it->first) + { + // move v1 forward + v1_it = v1.lower_bound(v2_it->first); + // v1_it = (first element >= v2_it.id) + } + else + { + // move v2 forward + v2_it = v2.lower_bound(v1_it->first); + // v2_it = (first element >= v1_it.id) + } + } + + return score; // already scaled +} + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- + +double DotProductScoring::score(const BowVector &v1, + const BowVector &v2) const +{ + BowVector::const_iterator v1_it, v2_it; + const BowVector::const_iterator v1_end = v1.end(); + const BowVector::const_iterator v2_end = v2.end(); + + v1_it = v1.begin(); + v2_it = v2.begin(); + + double score = 0; + + while(v1_it != v1_end && v2_it != v2_end) + { + const WordValue& vi = v1_it->second; + const WordValue& wi = v2_it->second; + + if(v1_it->first == v2_it->first) + { + score += vi * wi; + + // move v1 and v2 forward + ++v1_it; + ++v2_it; + } + else if(v1_it->first < v2_it->first) + { + // move v1 forward + v1_it = v1.lower_bound(v2_it->first); + // v1_it = (first element >= v2_it.id) + } + else + { + // move v2 forward + v2_it = v2.lower_bound(v1_it->first); + // v2_it = (first element >= v1_it.id) + } + } + + return score; // cannot scale +} + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- + diff --git a/loop_fusion/src/ThirdParty/DBoW/ScoringObject.h b/loop_fusion/src/ThirdParty/DBoW/ScoringObject.h new file mode 100644 index 0000000..c57502d --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/ScoringObject.h @@ -0,0 +1,95 @@ +/** + * File: ScoringObject.h + * Date: November 2011 + * Author: Dorian Galvez-Lopez + * Description: functions to compute bow scores + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_SCORING_OBJECT__ +#define __D_T_SCORING_OBJECT__ + +#include "BowVector.h" + +namespace DBoW2 { + +/// Base class of scoring functions +class GeneralScoring +{ +public: + /** + * Computes the score between two vectors. Vectors must be sorted and + * normalized if necessary + * @param v (in/out) + * @param w (in/out) + * @return score + */ + virtual double score(const BowVector &v, const BowVector &w) const = 0; + + /** + * Returns whether a vector must be normalized before scoring according + * to the scoring scheme + * @param norm norm to use + * @return true iff must normalize + */ + virtual bool mustNormalize(LNorm &norm) const = 0; + + /// Log of epsilon + static const double LOG_EPS; + // If you change the type of WordValue, make sure you change also the + // epsilon value (this is needed by the KL method) + + virtual ~GeneralScoring() {} //!< Required for virtual base classes +}; + +/** + * Macro for defining Scoring classes + * @param NAME name of class + * @param MUSTNORMALIZE if vectors must be normalized to compute the score + * @param NORM type of norm to use when MUSTNORMALIZE + */ +#define __SCORING_CLASS(NAME, MUSTNORMALIZE, NORM) \ + NAME: public GeneralScoring \ + { public: \ + /** \ + * Computes score between two vectors \ + * @param v \ + * @param w \ + * @return score between v and w \ + */ \ + virtual double score(const BowVector &v, const BowVector &w) const; \ + \ + /** \ + * Says if a vector must be normalized according to the scoring function \ + * @param norm (out) if true, norm to use + * @return true iff vectors must be normalized \ + */ \ + virtual inline bool mustNormalize(LNorm &norm) const \ + { norm = NORM; return MUSTNORMALIZE; } \ + } + +/// L1 Scoring object +class __SCORING_CLASS(L1Scoring, true, L1); + +/// L2 Scoring object +class __SCORING_CLASS(L2Scoring, true, L2); + +/// Chi square Scoring object +class __SCORING_CLASS(ChiSquareScoring, true, L1); + +/// KL divergence Scoring object +class __SCORING_CLASS(KLScoring, true, L1); + +/// Bhattacharyya Scoring object +class __SCORING_CLASS(BhattacharyyaScoring, true, L1); + +/// Dot product Scoring object +class __SCORING_CLASS(DotProductScoring, false, L1); + +#undef __SCORING_CLASS + +} // namespace DBoW2 + +#endif + diff --git a/loop_fusion/src/ThirdParty/DBoW/TemplatedDatabase.h b/loop_fusion/src/ThirdParty/DBoW/TemplatedDatabase.h new file mode 100644 index 0000000..d65574f --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/TemplatedDatabase.h @@ -0,0 +1,1390 @@ +/** + * File: TemplatedDatabase.h + * Date: March 2011 + * Author: Dorian Galvez-Lopez + * Description: templated database of images + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_TEMPLATED_DATABASE__ +#define __D_T_TEMPLATED_DATABASE__ + +#include +#include +#include +#include +#include +#include + +#include "TemplatedVocabulary.h" +#include "QueryResults.h" +#include "ScoringObject.h" +#include "BowVector.h" +#include "FeatureVector.h" + +#include "../DUtils/DUtils.h" + +namespace DBoW2 { + +// For query functions +static int MIN_COMMON_WORDS = 5; + +/// @param TDescriptor class of descriptor +/// @param F class of descriptor functions +template +/// Generic Database +class TemplatedDatabase +{ +public: + + /** + * Creates an empty database without vocabulary + * @param use_di a direct index is used to store feature indexes + * @param di_levels levels to go up the vocabulary tree to select the + * node id to store in the direct index when adding images + */ + explicit TemplatedDatabase(bool use_di = true, int di_levels = 0); + + /** + * Creates a database with the given vocabulary + * @param T class inherited from TemplatedVocabulary + * @param voc vocabulary + * @param use_di a direct index is used to store feature indexes + * @param di_levels levels to go up the vocabulary tree to select the + * node id to store in the direct index when adding images + */ + template + explicit TemplatedDatabase(const T &voc, bool use_di = true, + int di_levels = 0); + + /** + * Copy constructor. Copies the vocabulary too + * @param db object to copy + */ + TemplatedDatabase(const TemplatedDatabase &db); + + /** + * Creates the database from a file + * @param filename + */ + TemplatedDatabase(const std::string &filename); + + /** + * Creates the database from a file + * @param filename + */ + TemplatedDatabase(const char *filename); + + /** + * Destructor + */ + virtual ~TemplatedDatabase(void); + + /** + * Copies the given database and its vocabulary + * @param db database to copy + */ + TemplatedDatabase& operator=( + const TemplatedDatabase &db); + + /** + * Sets the vocabulary to use and clears the content of the database. + * @param T class inherited from TemplatedVocabulary + * @param voc vocabulary to copy + */ + template + inline void setVocabulary(const T &voc); + + /** + * Sets the vocabulary to use and the direct index parameters, and clears + * the content of the database + * @param T class inherited from TemplatedVocabulary + * @param voc vocabulary to copy + * @param use_di a direct index is used to store feature indexes + * @param di_levels levels to go up the vocabulary tree to select the + * node id to store in the direct index when adding images + */ + template + void setVocabulary(const T& voc, bool use_di, int di_levels = 0); + + /** + * Returns a pointer to the vocabulary used + * @return vocabulary + */ + inline const TemplatedVocabulary* getVocabulary() const; + + /** + * Allocates some memory for the direct and inverted indexes + * @param nd number of expected image entries in the database + * @param ni number of expected words per image + * @note Use 0 to ignore a parameter + */ + void allocate(int nd = 0, int ni = 0); + + /** + * Adds an entry to the database and returns its index + * @param features features of the new entry + * @param bowvec if given, the bow vector of these features is returned + * @param fvec if given, the vector of nodes and feature indexes is returned + * @return id of new entry + */ + EntryId add(const std::vector &features, + BowVector *bowvec = NULL, FeatureVector *fvec = NULL); + + /** + * Adss an entry to the database and returns its index + * @param vec bow vector + * @param fec feature vector to add the entry. Only necessary if using the + * direct index + * @return id of new entry + */ + EntryId add(const BowVector &vec, + const FeatureVector &fec = FeatureVector() ); + + void delete_entry(const EntryId entry_id); + + /** + * Empties the database + */ + inline void clear(); + + /** + * Returns the number of entries in the database + * @return number of entries in the database + */ + inline unsigned int size() const; + + /** + * Checks if the direct index is being used + * @return true iff using direct index + */ + inline bool usingDirectIndex() const; + + /** + * Returns the di levels when using direct index + * @return di levels + */ + inline int getDirectIndexLevels() const; + + /** + * Queries the database with some features + * @param features query features + * @param ret (out) query results + * @param max_results number of results to return. <= 0 means all + * @param max_id only entries with id <= max_id are returned in ret. + * < 0 means all + */ + void query(const std::vector &features, QueryResults &ret, + int max_results = 1, int max_id = -1) const; + + /** + * Queries the database with a vector + * @param vec bow vector already normalized + * @param ret results + * @param max_results number of results to return. <= 0 means all + * @param max_id only entries with id <= max_id are returned in ret. + * < 0 means all + */ + void query(const BowVector &vec, QueryResults &ret, + int max_results = 1, int max_id = -1) const; + + /** + * Returns the a feature vector associated with a database entry + * @param id entry id (must be < size()) + * @return const reference to map of nodes and their associated features in + * the given entry + */ + const FeatureVector& retrieveFeatures(EntryId id) const; + + /** + * Stores the database in a file + * @param filename + */ + void save(const std::string &filename) const; + + /** + * Loads the database from a file + * @param filename + */ + void load(const std::string &filename); + + /** + * Stores the database in the given file storage structure + * @param fs + * @param name node name + */ + virtual void save(cv::FileStorage &fs, + const std::string &name = "database") const; + + /** + * Loads the database from the given file storage structure + * @param fs + * @param name node name + */ + virtual void load(const cv::FileStorage &fs, + const std::string &name = "database"); + +protected: + + /// Query with L1 scoring + void queryL1(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + + /// Query with L2 scoring + void queryL2(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + + /// Query with Chi square scoring + void queryChiSquare(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + + /// Query with Bhattacharyya scoring + void queryBhattacharyya(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + + /// Query with KL divergence scoring + void queryKL(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + + /// Query with dot product scoring + void queryDotProduct(const BowVector &vec, QueryResults &ret, + int max_results, int max_id) const; + +protected: + + /* Inverted file declaration */ + + /// Item of IFRow + struct IFPair + { + /// Entry id + EntryId entry_id; + + /// Word weight in this entry + WordValue word_weight; + + /** + * Creates an empty pair + */ + IFPair(){} + + /** + * Creates an inverted file pair + * @param eid entry id + * @param wv word weight + */ + IFPair(EntryId eid, WordValue wv): entry_id(eid), word_weight(wv) {} + + /** + * Compares the entry ids + * @param eid + * @return true iff this entry id is the same as eid + */ + inline bool operator==(EntryId eid) const { return entry_id == eid; } + }; + + /// Row of InvertedFile + typedef std::list IFRow; + // IFRows are sorted in ascending entry_id order + + /// Inverted index + typedef std::vector InvertedFile; + // InvertedFile[word_id] --> inverted file of that word + + /* Direct file declaration */ + + /// Direct index + typedef std::vector DirectFile; + // DirectFile[entry_id] --> [ directentry, ... ] + +protected: + + /// Associated vocabulary + TemplatedVocabulary *m_voc; + + /// Flag to use direct index + bool m_use_di; + + /// Levels to go up the vocabulary tree to select nodes to store + /// in the direct index + int m_dilevels; + + /// Inverted file (must have size() == |words|) + InvertedFile m_ifile; + + + /// Direct file (resized for allocation) + DirectFile m_dfile; + + std::vector m_dBowfile; + + /// Number of valid entries in m_dfile + int m_nentries; + +}; + +// -------------------------------------------------------------------------- + +template +TemplatedDatabase::TemplatedDatabase + (bool use_di, int di_levels) + : m_voc(NULL), m_use_di(use_di), m_dilevels(di_levels), m_nentries(0) +{ +} + +// -------------------------------------------------------------------------- + +template +template +TemplatedDatabase::TemplatedDatabase + (const T &voc, bool use_di, int di_levels) + : m_voc(NULL), m_use_di(use_di), m_dilevels(di_levels) +{ + setVocabulary(voc); + clear(); +} + +// -------------------------------------------------------------------------- + +template +TemplatedDatabase::TemplatedDatabase + (const TemplatedDatabase &db) + : m_voc(NULL) +{ + *this = db; +} + +// -------------------------------------------------------------------------- + +template +TemplatedDatabase::TemplatedDatabase + (const std::string &filename) + : m_voc(NULL) +{ + load(filename); +} + +// -------------------------------------------------------------------------- + +template +TemplatedDatabase::TemplatedDatabase + (const char *filename) + : m_voc(NULL) +{ + load(filename); +} + +// -------------------------------------------------------------------------- + +template +TemplatedDatabase::~TemplatedDatabase(void) +{ + delete m_voc; +} + +// -------------------------------------------------------------------------- + +template +TemplatedDatabase& TemplatedDatabase::operator= + (const TemplatedDatabase &db) +{ + if(this != &db) + { + m_dfile = db.m_dfile; + m_dBowfile = db.m_dBowfile; + m_dilevels = db.m_dilevels; + m_ifile = db.m_ifile; + m_nentries = db.m_nentries; + m_use_di = db.m_use_di; + setVocabulary(*db.m_voc); + } + return *this; +} + +// -------------------------------------------------------------------------- + +template +EntryId TemplatedDatabase::add( + const std::vector &features, + BowVector *bowvec, FeatureVector *fvec) +{ + BowVector aux; + BowVector& v = (bowvec ? *bowvec : aux); + + if(m_use_di && fvec != NULL) + { + m_voc->transform(features, v, *fvec, m_dilevels); // with features + return add(v, *fvec); + } + else if(m_use_di) + { + FeatureVector fv; + m_voc->transform(features, v, fv, m_dilevels); // with features + return add(v, fv); + } + else if(fvec != NULL) + { + m_voc->transform(features, v, *fvec, m_dilevels); // with features + return add(v); + } + else + { + m_voc->transform(features, v); // with features + return add(v); + } +} + +// --------------------------------------------------------------------------- + +template +EntryId TemplatedDatabase::add(const BowVector &v, + const FeatureVector &fv) +{ + EntryId entry_id = m_nentries++; + + BowVector::const_iterator vit; + std::vector::const_iterator iit; + + if(m_use_di) + { + // update direct file + if(entry_id == m_dfile.size()) + { + m_dfile.push_back(fv); + m_dBowfile.push_back(v); + } + else + { + m_dfile[entry_id] = fv; + m_dBowfile[entry_id] = v; + } + } + + // update inverted file + for(vit = v.begin(); vit != v.end(); ++vit) + { + const WordId& word_id = vit->first; + const WordValue& word_weight = vit->second; + + IFRow& ifrow = m_ifile[word_id]; + ifrow.push_back(IFPair(entry_id, word_weight)); + } + + return entry_id; +} + +// --------------------------------------------------------------------------- + +template +void TemplatedDatabase::delete_entry(const EntryId entry_id) +{ + BowVector v = m_dBowfile[entry_id]; + + BowVector::const_iterator vit; + + for (vit = v.begin(); vit != v.end(); ++vit) + { + const WordId& word_id = vit->first; + IFRow& ifrow = m_ifile[word_id]; + typename IFRow::iterator rit; + for (rit = ifrow.begin(); rit != ifrow.end(); ++rit) + { + if (rit->entry_id == entry_id) + { + ifrow.erase(rit); + break; + } + } + } + m_dBowfile[entry_id].clear(); + m_dfile[entry_id].clear(); +} + + +// -------------------------------------------------------------------------- + +template +template +inline void TemplatedDatabase::setVocabulary + (const T& voc) +{ + delete m_voc; + m_voc = new T(voc); + clear(); +} + +// -------------------------------------------------------------------------- + +template +template +inline void TemplatedDatabase::setVocabulary + (const T& voc, bool use_di, int di_levels) +{ + m_use_di = use_di; + m_dilevels = di_levels; + delete m_voc; + m_voc = new T(voc); + clear(); +} + +// -------------------------------------------------------------------------- + +template +inline const TemplatedVocabulary* +TemplatedDatabase::getVocabulary() const +{ + return m_voc; +} + +// -------------------------------------------------------------------------- + +template +inline void TemplatedDatabase::clear() +{ + // resize vectors + m_ifile.resize(0); + m_ifile.resize(m_voc->size()); + m_dfile.resize(0); + m_dBowfile.resize(0); + m_nentries = 0; +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::allocate(int nd, int ni) +{ + // m_ifile already contains |words| items + if(ni > 0) + { + typename std::vector::iterator rit; + for(rit = m_ifile.begin(); rit != m_ifile.end(); ++rit) + { + int n = (int)rit->size(); + if(ni > n) + { + rit->resize(ni); + rit->resize(n); + } + } + } + + if(m_use_di && (int)m_dfile.size() < nd) + { + m_dfile.resize(nd); + m_dBowfile.resize(nd); + } +} + +// -------------------------------------------------------------------------- + +template +inline unsigned int TemplatedDatabase::size() const +{ + return m_nentries; +} + +// -------------------------------------------------------------------------- + +template +inline bool TemplatedDatabase::usingDirectIndex() const +{ + return m_use_di; +} + +// -------------------------------------------------------------------------- + +template +inline int TemplatedDatabase::getDirectIndexLevels() const +{ + return m_dilevels; +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::query( + const std::vector &features, + QueryResults &ret, int max_results, int max_id) const +{ + BowVector vec; + m_voc->transform(features, vec); + query(vec, ret, max_results, max_id); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::query( + const BowVector &vec, + QueryResults &ret, int max_results, int max_id) const +{ + ret.resize(0); + + switch(m_voc->getScoringType()) + { + case L1_NORM: + queryL1(vec, ret, max_results, max_id); + break; + + case L2_NORM: + queryL2(vec, ret, max_results, max_id); + break; + + case CHI_SQUARE: + queryChiSquare(vec, ret, max_results, max_id); + break; + + case KL: + queryKL(vec, ret, max_results, max_id); + break; + + case BHATTACHARYYA: + queryBhattacharyya(vec, ret, max_results, max_id); + break; + + case DOT_PRODUCT: + queryDotProduct(vec, ret, max_results, max_id); + break; + } +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::queryL1(const BowVector &vec, + QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + typename IFRow::const_iterator rit; + + std::map pairs; + std::map::iterator pit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& qvalue = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& dvalue = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1 || (int)entry_id == m_nentries - 1) + { + double value = fabs(qvalue - dvalue) - fabs(qvalue) - fabs(dvalue); + + pit = pairs.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second += value; + } + else + { + pairs.insert(pit, + std::map::value_type(entry_id, value)); + } + } + + } // for each inverted row + } // for each query word + + // move to vector + ret.reserve(pairs.size()); + for(pit = pairs.begin(); pit != pairs.end(); ++pit) + { + ret.push_back(Result(pit->first, pit->second)); + } + + // resulting "scores" are now in [-2 best .. 0 worst] + + // sort vector in ascending order of score + std::sort(ret.begin(), ret.end()); + // (ret is inverted now --the lower the better--) + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + + // complete and scale score to [0 worst .. 1 best] + // ||v - w||_{L1} = 2 + Sum(|v_i - w_i| - |v_i| - |w_i|) + // for all i | v_i != 0 and w_i != 0 + // (Nister, 2006) + // scaled_||v - w||_{L1} = 1 - 0.5 * ||v - w||_{L1} + QueryResults::iterator qit; + for(qit = ret.begin(); qit != ret.end(); qit++) + qit->Score = -qit->Score/2.0; +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::queryL2(const BowVector &vec, + QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + typename IFRow::const_iterator rit; + + std::map pairs; + std::map::iterator pit; + + //map counters; + //map::iterator cit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& qvalue = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& dvalue = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + double value = - qvalue * dvalue; // minus sign for sorting trick + + pit = pairs.lower_bound(entry_id); + //cit = counters.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second += value; + //cit->second += 1; + } + else + { + pairs.insert(pit, + std::map::value_type(entry_id, value)); + + //counters.insert(cit, + // map::value_type(entry_id, 1)); + } + } + + } // for each inverted row + } // for each query word + + // move to vector + ret.reserve(pairs.size()); + //cit = counters.begin(); + for(pit = pairs.begin(); pit != pairs.end(); ++pit)//, ++cit) + { + ret.push_back(Result(pit->first, pit->second));// / cit->second)); + } + + // resulting "scores" are now in [-1 best .. 0 worst] + + // sort vector in ascending order of score + std::sort(ret.begin(), ret.end()); + // (ret is inverted now --the lower the better--) + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + + // complete and scale score to [0 worst .. 1 best] + // ||v - w||_{L2} = sqrt( 2 - 2 * Sum(v_i * w_i) + // for all i | v_i != 0 and w_i != 0 ) + // (Nister, 2006) + QueryResults::iterator qit; + for(qit = ret.begin(); qit != ret.end(); qit++) + { + if(qit->Score <= -1.0) // rounding error + qit->Score = 1.0; + else + qit->Score = 1.0 - sqrt(1.0 + qit->Score); // [0..1] + // the + sign is ok, it is due to - sign in + // value = - qvalue * dvalue + } + +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::queryChiSquare(const BowVector &vec, + QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + typename IFRow::const_iterator rit; + + std::map > pairs; + std::map >::iterator pit; + + std::map > sums; // < sum vi, sum wi > + std::map >::iterator sit; + + // In the current implementation, we suppose vec is not normalized + + //map expected; + //map::iterator eit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& qvalue = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& dvalue = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + // (v-w)^2/(v+w) - v - w = -4 vw/(v+w) + // we move the 4 out + double value = 0; + if(qvalue + dvalue != 0.0) // words may have weight zero + value = - qvalue * dvalue / (qvalue + dvalue); + + pit = pairs.lower_bound(entry_id); + sit = sums.lower_bound(entry_id); + //eit = expected.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second.first += value; + pit->second.second += 1; + //eit->second += dvalue; + sit->second.first += qvalue; + sit->second.second += dvalue; + } + else + { + pairs.insert(pit, + std::map >::value_type(entry_id, + std::make_pair(value, 1) )); + //expected.insert(eit, + // map::value_type(entry_id, dvalue)); + + sums.insert(sit, + std::map >::value_type(entry_id, + std::make_pair(qvalue, dvalue) )); + } + } + + } // for each inverted row + } // for each query word + + // move to vector + ret.reserve(pairs.size()); + sit = sums.begin(); + for(pit = pairs.begin(); pit != pairs.end(); ++pit, ++sit) + { + if(pit->second.second >= MIN_COMMON_WORDS) + { + ret.push_back(Result(pit->first, pit->second.first)); + ret.back().nWords = pit->second.second; + ret.back().sumCommonVi = sit->second.first; + ret.back().sumCommonWi = sit->second.second; + ret.back().expectedChiScore = + 2 * sit->second.second / (1 + sit->second.second); + } + + //ret.push_back(Result(pit->first, pit->second)); + } + + // resulting "scores" are now in [-2 best .. 0 worst] + // we have to add +2 to the scores to obtain the chi square score + + // sort vector in ascending order of score + std::sort(ret.begin(), ret.end()); + // (ret is inverted now --the lower the better--) + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + + // complete and scale score to [0 worst .. 1 best] + QueryResults::iterator qit; + for(qit = ret.begin(); qit != ret.end(); qit++) + { + // this takes the 4 into account + qit->Score = - 2. * qit->Score; // [0..1] + + qit->chiScore = qit->Score; + } + +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::queryKL(const BowVector &vec, + QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + typename IFRow::const_iterator rit; + + std::map pairs; + std::map::iterator pit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& vi = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& wi = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + double value = 0; + if(vi != 0 && wi != 0) value = vi * log(vi/wi); + + pit = pairs.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second += value; + } + else + { + pairs.insert(pit, + std::map::value_type(entry_id, value)); + } + } + + } // for each inverted row + } // for each query word + + // resulting "scores" are now in [-X worst .. 0 best .. X worst] + // but we cannot make sure which ones are better without calculating + // the complete score + + // complete scores and move to vector + ret.reserve(pairs.size()); + for(pit = pairs.begin(); pit != pairs.end(); ++pit) + { + EntryId eid = pit->first; + double value = 0.0; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordValue &vi = vit->second; + const IFRow& row = m_ifile[vit->first]; + + if(vi != 0) + { + if(row.end() == find(row.begin(), row.end(), eid )) + { + value += vi * (log(vi) - GeneralScoring::LOG_EPS); + } + } + } + + pit->second += value; + + // to vector + ret.push_back(Result(pit->first, pit->second)); + } + + // real scores are now in [0 best .. X worst] + + // sort vector in ascending order + // (scores are inverted now --the lower the better--) + std::sort(ret.begin(), ret.end()); + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + + // cannot scale scores + +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::queryBhattacharyya( + const BowVector &vec, QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + typename IFRow::const_iterator rit; + + //map pairs; + //map::iterator pit; + + std::map > pairs; // > + std::map >::iterator pit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& qvalue = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& dvalue = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + double value = sqrt(qvalue * dvalue); + + pit = pairs.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second.first += value; + pit->second.second += 1; + } + else + { + pairs.insert(pit, + std::map >::value_type(entry_id, + std::make_pair(value, 1))); + } + } + + } // for each inverted row + } // for each query word + + // move to vector + ret.reserve(pairs.size()); + for(pit = pairs.begin(); pit != pairs.end(); ++pit) + { + if(pit->second.second >= MIN_COMMON_WORDS) + { + ret.push_back(Result(pit->first, pit->second.first)); + ret.back().nWords = pit->second.second; + ret.back().bhatScore = pit->second.first; + } + } + + // scores are already in [0..1] + + // sort vector in descending order + std::sort(ret.begin(), ret.end(), Result::gt); + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + +} + +// --------------------------------------------------------------------------- + +template +void TemplatedDatabase::queryDotProduct( + const BowVector &vec, QueryResults &ret, int max_results, int max_id) const +{ + BowVector::const_iterator vit; + typename IFRow::const_iterator rit; + + std::map pairs; + std::map::iterator pit; + + for(vit = vec.begin(); vit != vec.end(); ++vit) + { + const WordId word_id = vit->first; + const WordValue& qvalue = vit->second; + + const IFRow& row = m_ifile[word_id]; + + // IFRows are sorted in ascending entry_id order + + for(rit = row.begin(); rit != row.end(); ++rit) + { + const EntryId entry_id = rit->entry_id; + const WordValue& dvalue = rit->word_weight; + + if((int)entry_id < max_id || max_id == -1) + { + double value; + if(this->m_voc->getWeightingType() == BINARY) + value = 1; + else + value = qvalue * dvalue; + + pit = pairs.lower_bound(entry_id); + if(pit != pairs.end() && !(pairs.key_comp()(entry_id, pit->first))) + { + pit->second += value; + } + else + { + pairs.insert(pit, + std::map::value_type(entry_id, value)); + } + } + + } // for each inverted row + } // for each query word + + // move to vector + ret.reserve(pairs.size()); + for(pit = pairs.begin(); pit != pairs.end(); ++pit) + { + ret.push_back(Result(pit->first, pit->second)); + } + + // scores are the greater the better + + // sort vector in descending order + std::sort(ret.begin(), ret.end(), Result::gt); + + // cut vector + if(max_results > 0 && (int)ret.size() > max_results) + ret.resize(max_results); + + // these scores cannot be scaled +} + +// --------------------------------------------------------------------------- + +template +const FeatureVector& TemplatedDatabase::retrieveFeatures + (EntryId id) const +{ + assert(id < size()); + return m_dfile[id]; +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::save(const std::string &filename) const +{ + cv::FileStorage fs(filename.c_str(), cv::FileStorage::WRITE); + if(!fs.isOpened()) throw std::string("Could not open file ") + filename; + + save(fs); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::save(cv::FileStorage &fs, + const std::string &name) const +{ + // Format YAML: + // vocabulary { ... see TemplatedVocabulary::save } + // database + // { + // nEntries: + // usingDI: + // diLevels: + // invertedIndex + // [ + // [ + // { + // imageId: + // weight: + // } + // ] + // ] + // directIndex + // [ + // [ + // { + // nodeId: + // features: [ ] + // } + // ] + // ] + + // invertedIndex[i] is for the i-th word + // directIndex[i] is for the i-th entry + // directIndex may be empty if not using direct index + // + // imageId's and nodeId's must be stored in ascending order + // (according to the construction of the indexes) + + m_voc->save(fs); + + fs << name << "{"; + + fs << "nEntries" << m_nentries; + fs << "usingDI" << (m_use_di ? 1 : 0); + fs << "diLevels" << m_dilevels; + + fs << "invertedIndex" << "["; + + typename InvertedFile::const_iterator iit; + typename IFRow::const_iterator irit; + for(iit = m_ifile.begin(); iit != m_ifile.end(); ++iit) + { + fs << "["; // word of IF + for(irit = iit->begin(); irit != iit->end(); ++irit) + { + fs << "{:" + << "imageId" << (int)irit->entry_id + << "weight" << irit->word_weight + << "}"; + } + fs << "]"; // word of IF + } + + fs << "]"; // invertedIndex + + fs << "directIndex" << "["; + + typename DirectFile::const_iterator dit; + typename FeatureVector::const_iterator drit; + for(dit = m_dfile.begin(); dit != m_dfile.end(); ++dit) + { + fs << "["; // entry of DF + + for(drit = dit->begin(); drit != dit->end(); ++drit) + { + NodeId nid = drit->first; + const std::vector& features = drit->second; + + // save info of last_nid + fs << "{"; + fs << "nodeId" << (int)nid; + // msvc++ 2010 with opencv 2.3.1 does not allow FileStorage::operator<< + // with vectors of unsigned int + fs << "features" << "[" + << *(const std::vector*)(&features) << "]"; + fs << "}"; + } + + fs << "]"; // entry of DF + } + + fs << "]"; // directIndex + + fs << "}"; // database +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::load(const std::string &filename) +{ + cv::FileStorage fs(filename.c_str(), cv::FileStorage::READ); + if(!fs.isOpened()) throw std::string("Could not open file ") + filename; + + load(fs); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedDatabase::load(const cv::FileStorage &fs, + const std::string &name) +{ + // load voc first + // subclasses must instantiate m_voc before calling this ::load + if(!m_voc) m_voc = new TemplatedVocabulary; + + m_voc->load(fs); + + // load database now + clear(); // resizes inverted file + + cv::FileNode fdb = fs[name]; + + m_nentries = (int)fdb["nEntries"]; + m_use_di = (int)fdb["usingDI"] != 0; + m_dilevels = (int)fdb["diLevels"]; + + cv::FileNode fn = fdb["invertedIndex"]; + for(WordId wid = 0; wid < fn.size(); ++wid) + { + cv::FileNode fw = fn[wid]; + + for(unsigned int i = 0; i < fw.size(); ++i) + { + EntryId eid = (int)fw[i]["imageId"]; + WordValue v = fw[i]["weight"]; + + m_ifile[wid].push_back(IFPair(eid, v)); + } + } + + if(m_use_di) + { + fn = fdb["directIndex"]; + + m_dfile.resize(fn.size()); + m_dBowfile.resize(fn.size()); + assert(m_nentries == (int)fn.size()); + + FeatureVector::iterator dit; + for(EntryId eid = 0; eid < fn.size(); ++eid) + { + cv::FileNode fe = fn[eid]; + + m_dfile[eid].clear(); + m_dBowfile[eid].clear(); + for(unsigned int i = 0; i < fe.size(); ++i) + { + NodeId nid = (int)fe[i]["nodeId"]; + + dit = m_dfile[eid].insert(m_dfile[eid].end(), + make_pair(nid, std::vector() )); + + // this failed to compile with some opencv versions (2.3.1) + //fe[i]["features"] >> dit->second; + + // this was ok until OpenCV 2.4.1 + //std::vector aux; + //fe[i]["features"] >> aux; // OpenCV < 2.4.1 + //dit->second.resize(aux.size()); + //std::copy(aux.begin(), aux.end(), dit->second.begin()); + + cv::FileNode ff = fe[i]["features"][0]; + dit->second.reserve(ff.size()); + + cv::FileNodeIterator ffit; + for(ffit = ff.begin(); ffit != ff.end(); ++ffit) + { + dit->second.push_back((int)*ffit); + } + } + } // for each entry + } // if use_id + +} + +// -------------------------------------------------------------------------- + +/** + * Writes printable information of the database + * @param os stream to write to + * @param db + */ +template +std::ostream& operator<<(std::ostream &os, + const TemplatedDatabase &db) +{ + os << "Database: Entries = " << db.size() << ", " + "Using direct index = " << (db.usingDirectIndex() ? "yes" : "no"); + + if(db.usingDirectIndex()) + os << ", Direct index levels = " << db.getDirectIndexLevels(); + + os << ". " << *db.getVocabulary(); + return os; +} + +// -------------------------------------------------------------------------- + +} // namespace DBoW2 + +#endif diff --git a/loop_fusion/src/ThirdParty/DBoW/TemplatedVocabulary.h b/loop_fusion/src/ThirdParty/DBoW/TemplatedVocabulary.h new file mode 100644 index 0000000..1cd2e5b --- /dev/null +++ b/loop_fusion/src/ThirdParty/DBoW/TemplatedVocabulary.h @@ -0,0 +1,1606 @@ +/** + * File: TemplatedVocabulary.h + * Date: February 2011 + * Author: Dorian Galvez-Lopez + * Description: templated vocabulary + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_T_TEMPLATED_VOCABULARY__ +#define __D_T_TEMPLATED_VOCABULARY__ + +#include + +#include +#include +#include +#include +#include +#include + +#include "FeatureVector.h" +#include "BowVector.h" +#include "ScoringObject.h" + +#include "../DUtils/DUtils.h" + +// Added by VINS [[[ +#include "../VocabularyBinary.hpp" +#include +// Added by VINS ]]] + +namespace DBoW2 { + +/// @param TDescriptor class of descriptor +/// @param F class of descriptor functions +template +/// Generic Vocabulary +class TemplatedVocabulary +{ +public: + + /** + * Initiates an empty vocabulary + * @param k branching factor + * @param L depth levels + * @param weighting weighting type + * @param scoring scoring type + */ + TemplatedVocabulary(int k = 10, int L = 5, + WeightingType weighting = TF_IDF, ScoringType scoring = L1_NORM); + + /** + * Creates the vocabulary by loading a file + * @param filename + */ + TemplatedVocabulary(const std::string &filename); + + /** + * Creates the vocabulary by loading a file + * @param filename + */ + TemplatedVocabulary(const char *filename); + + /** + * Copy constructor + * @param voc + */ + TemplatedVocabulary(const TemplatedVocabulary &voc); + + /** + * Destructor + */ + virtual ~TemplatedVocabulary(); + + /** + * Assigns the given vocabulary to this by copying its data and removing + * all the data contained by this vocabulary before + * @param voc + * @return reference to this vocabulary + */ + TemplatedVocabulary& operator=( + const TemplatedVocabulary &voc); + + /** + * Creates a vocabulary from the training features with the already + * defined parameters + * @param training_features + */ + virtual void create + (const std::vector > &training_features); + + /** + * Creates a vocabulary from the training features, setting the branching + * factor and the depth levels of the tree + * @param training_features + * @param k branching factor + * @param L depth levels + */ + virtual void create + (const std::vector > &training_features, + int k, int L); + + /** + * Creates a vocabulary from the training features, setting the branching + * factor nad the depth levels of the tree, and the weighting and scoring + * schemes + */ + virtual void create + (const std::vector > &training_features, + int k, int L, WeightingType weighting, ScoringType scoring); + + /** + * Returns the number of words in the vocabulary + * @return number of words + */ + virtual inline unsigned int size() const; + + /** + * Returns whether the vocabulary is empty (i.e. it has not been trained) + * @return true iff the vocabulary is empty + */ + virtual inline bool empty() const; + + /** + * Transforms a set of descriptores into a bow vector + * @param features + * @param v (out) bow vector of weighted words + */ + virtual void transform(const std::vector& features, BowVector &v) + const; + + /** + * Transform a set of descriptors into a bow vector and a feature vector + * @param features + * @param v (out) bow vector + * @param fv (out) feature vector of nodes and feature indexes + * @param levelsup levels to go up the vocabulary tree to get the node index + */ + virtual void transform(const std::vector& features, + BowVector &v, FeatureVector &fv, int levelsup) const; + + /** + * Transforms a single feature into a word (without weight) + * @param feature + * @return word id + */ + virtual WordId transform(const TDescriptor& feature) const; + + /** + * Returns the score of two vectors + * @param a vector + * @param b vector + * @return score between vectors + * @note the vectors must be already sorted and normalized if necessary + */ + inline double score(const BowVector &a, const BowVector &b) const; + + /** + * Returns the id of the node that is "levelsup" levels from the word given + * @param wid word id + * @param levelsup 0..L + * @return node id. if levelsup is 0, returns the node id associated to the + * word id + */ + virtual NodeId getParentNode(WordId wid, int levelsup) const; + + /** + * Returns the ids of all the words that are under the given node id, + * by traversing any of the branches that goes down from the node + * @param nid starting node id + * @param words ids of words + */ + void getWordsFromNode(NodeId nid, std::vector &words) const; + + /** + * Returns the branching factor of the tree (k) + * @return k + */ + inline int getBranchingFactor() const { return m_k; } + + /** + * Returns the depth levels of the tree (L) + * @return L + */ + inline int getDepthLevels() const { return m_L; } + + /** + * Returns the real depth levels of the tree on average + * @return average of depth levels of leaves + */ + float getEffectiveLevels() const; + + /** + * Returns the descriptor of a word + * @param wid word id + * @return descriptor + */ + virtual inline TDescriptor getWord(WordId wid) const; + + /** + * Returns the weight of a word + * @param wid word id + * @return weight + */ + virtual inline WordValue getWordWeight(WordId wid) const; + + /** + * Returns the weighting method + * @return weighting method + */ + inline WeightingType getWeightingType() const { return m_weighting; } + + /** + * Returns the scoring method + * @return scoring method + */ + inline ScoringType getScoringType() const { return m_scoring; } + + /** + * Changes the weighting method + * @param type new weighting type + */ + inline void setWeightingType(WeightingType type); + + /** + * Changes the scoring method + * @param type new scoring type + */ + void setScoringType(ScoringType type); + + /** + * Saves the vocabulary into a file + * @param filename + */ + void save(const std::string &filename) const; + + /** + * Loads the vocabulary from a file + * @param filename + */ + void load(const std::string &filename); + + /** + * Saves the vocabulary to a file storage structure + * @param fn node in file storage + */ + virtual void save(cv::FileStorage &fs, + const std::string &name = "vocabulary") const; + + /** + * Loads the vocabulary from a file storage node + * @param fn first node + * @param subname name of the child node of fn where the tree is stored. + * If not given, the fn node is used instead + */ + virtual void load(const cv::FileStorage &fs, + const std::string &name = "vocabulary"); + + // Added by VINS [[[ + virtual void loadBin(const std::string &filename); + // Added by VINS ]]] + + /** + * Stops those words whose weight is below minWeight. + * Words are stopped by setting their weight to 0. There are not returned + * later when transforming image features into vectors. + * Note that when using IDF or TF_IDF, the weight is the idf part, which + * is equivalent to -log(f), where f is the frequency of the word + * (f = Ni/N, Ni: number of training images where the word is present, + * N: number of training images). + * Note that the old weight is forgotten, and subsequent calls to this + * function with a lower minWeight have no effect. + * @return number of words stopped now + */ + virtual int stopWords(double minWeight); + +protected: + + /// Pointer to descriptor + typedef const TDescriptor *pDescriptor; + + /// Tree node + struct Node + { + /// Node id + NodeId id; + /// Weight if the node is a word + WordValue weight; + /// Children + std::vector children; + /// Parent node (undefined in case of root) + NodeId parent; + /// Node descriptor + TDescriptor descriptor; + + /// Word id if the node is a word + WordId word_id; + + /** + * Empty constructor + */ + Node(): id(0), weight(0), parent(0), word_id(0){} + + /** + * Constructor + * @param _id node id + */ + Node(NodeId _id): id(_id), weight(0), parent(0), word_id(0){} + + /** + * Returns whether the node is a leaf node + * @return true iff the node is a leaf + */ + inline bool isLeaf() const { return children.empty(); } + }; + +protected: + + /** + * Creates an instance of the scoring object accoring to m_scoring + */ + void createScoringObject(); + + /** + * Returns a set of pointers to descriptores + * @param training_features all the features + * @param features (out) pointers to the training features + */ + void getFeatures( + const std::vector > &training_features, + std::vector &features) const; + + /** + * Returns the word id associated to a feature + * @param feature + * @param id (out) word id + * @param weight (out) word weight + * @param nid (out) if given, id of the node "levelsup" levels up + * @param levelsup + */ + virtual void transform(const TDescriptor &feature, + WordId &id, WordValue &weight, NodeId* nid = NULL, int levelsup = 0) const; + + /** + * Returns the word id associated to a feature + * @param feature + * @param id (out) word id + */ + virtual void transform(const TDescriptor &feature, WordId &id) const; + + /** + * Creates a level in the tree, under the parent, by running kmeans with + * a descriptor set, and recursively creates the subsequent levels too + * @param parent_id id of parent node + * @param descriptors descriptors to run the kmeans on + * @param current_level current level in the tree + */ + void HKmeansStep(NodeId parent_id, const std::vector &descriptors, + int current_level); + + /** + * Creates k clusters from the given descriptors with some seeding algorithm. + * @note In this class, kmeans++ is used, but this function should be + * overriden by inherited classes. + */ + virtual void initiateClusters(const std::vector &descriptors, + std::vector &clusters) const; + + /** + * Creates k clusters from the given descriptor sets by running the + * initial step of kmeans++ + * @param descriptors + * @param clusters resulting clusters + */ + void initiateClustersKMpp(const std::vector &descriptors, + std::vector &clusters) const; + + /** + * Create the words of the vocabulary once the tree has been built + */ + void createWords(); + + /** + * Sets the weights of the nodes of tree according to the given features. + * Before calling this function, the nodes and the words must be already + * created (by calling HKmeansStep and createWords) + * @param features + */ + void setNodeWeights(const std::vector > &features); + +protected: + + /// Branching factor + int m_k; + + /// Depth levels + int m_L; + + /// Weighting method + WeightingType m_weighting; + + /// Scoring method + ScoringType m_scoring; + + /// Object for computing scores + GeneralScoring* m_scoring_object; + + /// Tree nodes + std::vector m_nodes; + + /// Words of the vocabulary (tree leaves) + /// this condition holds: m_words[wid]->word_id == wid + std::vector m_words; + +}; + +// -------------------------------------------------------------------------- + +template +TemplatedVocabulary::TemplatedVocabulary + (int k, int L, WeightingType weighting, ScoringType scoring) + : m_k(k), m_L(L), m_weighting(weighting), m_scoring(scoring), + m_scoring_object(NULL) +{ + //printf("loop start load bin\n"); + createScoringObject(); +} + +// -------------------------------------------------------------------------- + +template +TemplatedVocabulary::TemplatedVocabulary + (const std::string &filename): m_scoring_object(NULL) +{ + //m_scoring = KL; + // Changed by VINS [[[ + //printf("loop start load bin\n"); + loadBin(filename); + // Changed by VINS ]]] +} + +// -------------------------------------------------------------------------- + +template +TemplatedVocabulary::TemplatedVocabulary + (const char *filename): m_scoring_object(NULL) +{ + //m_scoring = KL; + // Changed by VINS [[[ + //printf("loop start load bin\n"); + loadBin(filename); + // Changed by VINS ]]] +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::createScoringObject() +{ + delete m_scoring_object; + m_scoring_object = NULL; + + switch(m_scoring) + { + case L1_NORM: + m_scoring_object = new L1Scoring; + break; + + case L2_NORM: + m_scoring_object = new L2Scoring; + break; + + case CHI_SQUARE: + m_scoring_object = new ChiSquareScoring; + break; + + case KL: + m_scoring_object = new KLScoring; + break; + + case BHATTACHARYYA: + m_scoring_object = new BhattacharyyaScoring; + break; + + case DOT_PRODUCT: + m_scoring_object = new DotProductScoring; + break; + + } +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::setScoringType(ScoringType type) +{ + m_scoring = type; + createScoringObject(); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::setWeightingType(WeightingType type) +{ + this->m_weighting = type; +} + +// -------------------------------------------------------------------------- + +template +TemplatedVocabulary::TemplatedVocabulary( + const TemplatedVocabulary &voc) + : m_scoring_object(NULL) +{ + printf("loop start load vocabulary\n"); + *this = voc; +} + +// -------------------------------------------------------------------------- + +template +TemplatedVocabulary::~TemplatedVocabulary() +{ + delete m_scoring_object; +} + +// -------------------------------------------------------------------------- + +template +TemplatedVocabulary& +TemplatedVocabulary::operator= + (const TemplatedVocabulary &voc) +{ + this->m_k = voc.m_k; + this->m_L = voc.m_L; + this->m_scoring = voc.m_scoring; + this->m_weighting = voc.m_weighting; + + this->createScoringObject(); + + this->m_nodes.clear(); + this->m_words.clear(); + + this->m_nodes = voc.m_nodes; + this->createWords(); + + return *this; +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::create( + const std::vector > &training_features) +{ + m_nodes.clear(); + m_words.clear(); + + // expected_nodes = Sum_{i=0..L} ( k^i ) + int expected_nodes = + (int)((pow((double)m_k, (double)m_L + 1) - 1)/(m_k - 1)); + + m_nodes.reserve(expected_nodes); // avoid allocations when creating the tree + + + std::vector features; + getFeatures(training_features, features); + + + // create root + m_nodes.push_back(Node(0)); // root + + // create the tree + HKmeansStep(0, features, 1); + + // create the words + createWords(); + + // and set the weight of each node of the tree + setNodeWeights(training_features); + +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::create( + const std::vector > &training_features, + int k, int L) +{ + m_k = k; + m_L = L; + + create(training_features); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::create( + const std::vector > &training_features, + int k, int L, WeightingType weighting, ScoringType scoring) +{ + m_k = k; + m_L = L; + m_weighting = weighting; + m_scoring = scoring; + createScoringObject(); + + create(training_features); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::getFeatures( + const std::vector > &training_features, + std::vector &features) const +{ + features.resize(0); + + typename std::vector >::const_iterator vvit; + typename std::vector::const_iterator vit; + for(vvit = training_features.begin(); vvit != training_features.end(); ++vvit) + { + features.reserve(features.size() + vvit->size()); + for(vit = vvit->begin(); vit != vvit->end(); ++vit) + { + features.push_back(&(*vit)); + } + } +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::HKmeansStep(NodeId parent_id, + const std::vector &descriptors, int current_level) +{ + if(descriptors.empty()) return; + + // features associated to each cluster + std::vector clusters; + std::vector > groups; // groups[i] = [j1, j2, ...] + // j1, j2, ... indices of descriptors associated to cluster i + + clusters.reserve(m_k); + groups.reserve(m_k); + + //const int msizes[] = { m_k, descriptors.size() }; + //cv::SparseMat assoc(2, msizes, CV_8U); + //cv::SparseMat last_assoc(2, msizes, CV_8U); + //// assoc.row(cluster_idx).col(descriptor_idx) = 1 iif associated + + if((int)descriptors.size() <= m_k) + { + // trivial case: one cluster per feature + groups.resize(descriptors.size()); + + for(unsigned int i = 0; i < descriptors.size(); i++) + { + groups[i].push_back(i); + clusters.push_back(*descriptors[i]); + } + } + else + { + // select clusters and groups with kmeans + + bool first_time = true; + bool goon = true; + + // to check if clusters move after iterations + std::vector last_association, current_association; + + while(goon) + { + // 1. Calculate clusters + + if(first_time) + { + // random sample + initiateClusters(descriptors, clusters); + } + else + { + // calculate cluster centres + + for(unsigned int c = 0; c < clusters.size(); ++c) + { + std::vector cluster_descriptors; + cluster_descriptors.reserve(groups[c].size()); + + /* + for(unsigned int d = 0; d < descriptors.size(); ++d) + { + if( assoc.find(c, d) ) + { + cluster_descriptors.push_back(descriptors[d]); + } + } + */ + + std::vector::const_iterator vit; + for(vit = groups[c].begin(); vit != groups[c].end(); ++vit) + { + cluster_descriptors.push_back(descriptors[*vit]); + } + + + F::meanValue(cluster_descriptors, clusters[c]); + } + + } // if(!first_time) + + // 2. Associate features with clusters + + // calculate distances to cluster centers + groups.clear(); + groups.resize(clusters.size(), std::vector()); + current_association.resize(descriptors.size()); + + //assoc.clear(); + + typename std::vector::const_iterator fit; + //unsigned int d = 0; + for(fit = descriptors.begin(); fit != descriptors.end(); ++fit)//, ++d) + { + double best_dist = F::distance(*(*fit), clusters[0]); + unsigned int icluster = 0; + + for(unsigned int c = 1; c < clusters.size(); ++c) + { + double dist = F::distance(*(*fit), clusters[c]); + if(dist < best_dist) + { + best_dist = dist; + icluster = c; + } + } + + //assoc.ref(icluster, d) = 1; + + groups[icluster].push_back(fit - descriptors.begin()); + current_association[ fit - descriptors.begin() ] = icluster; + } + + // kmeans++ ensures all the clusters has any feature associated with them + + // 3. check convergence + if(first_time) + { + first_time = false; + } + else + { + //goon = !eqUChar(last_assoc, assoc); + + goon = false; + for(unsigned int i = 0; i < current_association.size(); i++) + { + if(current_association[i] != last_association[i]){ + goon = true; + break; + } + } + } + + if(goon) + { + // copy last feature-cluster association + last_association = current_association; + //last_assoc = assoc.clone(); + } + + } // while(goon) + + } // if must run kmeans + + // create nodes + for(unsigned int i = 0; i < clusters.size(); ++i) + { + NodeId id = m_nodes.size(); + m_nodes.push_back(Node(id)); + m_nodes.back().descriptor = clusters[i]; + m_nodes.back().parent = parent_id; + m_nodes[parent_id].children.push_back(id); + } + + // go on with the next level + if(current_level < m_L) + { + // iterate again with the resulting clusters + const std::vector &children_ids = m_nodes[parent_id].children; + for(unsigned int i = 0; i < clusters.size(); ++i) + { + NodeId id = children_ids[i]; + + std::vector child_features; + child_features.reserve(groups[i].size()); + + std::vector::const_iterator vit; + for(vit = groups[i].begin(); vit != groups[i].end(); ++vit) + { + child_features.push_back(descriptors[*vit]); + } + + if(child_features.size() > 1) + { + HKmeansStep(id, child_features, current_level + 1); + } + } + } +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::initiateClusters + (const std::vector &descriptors, + std::vector &clusters) const +{ + initiateClustersKMpp(descriptors, clusters); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::initiateClustersKMpp( + const std::vector &pfeatures, + std::vector &clusters) const +{ + // Implements kmeans++ seeding algorithm + // Algorithm: + // 1. Choose one center uniformly at random from among the data points. + // 2. For each data point x, compute D(x), the distance between x and the nearest + // center that has already been chosen. + // 3. Add one new data point as a center. Each point x is chosen with probability + // proportional to D(x)^2. + // 4. Repeat Steps 2 and 3 until k centers have been chosen. + // 5. Now that the initial centers have been chosen, proceed using standard k-means + // clustering. + + DUtils::Random::SeedRandOnce(); + + clusters.resize(0); + clusters.reserve(m_k); + std::vector min_dists(pfeatures.size(), std::numeric_limits::max()); + + // 1. + + int ifeature = DUtils::Random::RandomInt(0, pfeatures.size()-1); + + // create first cluster + clusters.push_back(*pfeatures[ifeature]); + + // compute the initial distances + typename std::vector::const_iterator fit; + std::vector::iterator dit; + dit = min_dists.begin(); + for(fit = pfeatures.begin(); fit != pfeatures.end(); ++fit, ++dit) + { + *dit = F::distance(*(*fit), clusters.back()); + } + + while((int)clusters.size() < m_k) + { + // 2. + dit = min_dists.begin(); + for(fit = pfeatures.begin(); fit != pfeatures.end(); ++fit, ++dit) + { + if(*dit > 0) + { + double dist = F::distance(*(*fit), clusters.back()); + if(dist < *dit) *dit = dist; + } + } + + // 3. + double dist_sum = std::accumulate(min_dists.begin(), min_dists.end(), 0.0); + + if(dist_sum > 0) + { + double cut_d; + do + { + cut_d = DUtils::Random::RandomValue(0, dist_sum); + } while(cut_d == 0.0); + + double d_up_now = 0; + for(dit = min_dists.begin(); dit != min_dists.end(); ++dit) + { + d_up_now += *dit; + if(d_up_now >= cut_d) break; + } + + if(dit == min_dists.end()) + ifeature = pfeatures.size()-1; + else + ifeature = dit - min_dists.begin(); + + clusters.push_back(*pfeatures[ifeature]); + + } // if dist_sum > 0 + else + break; + + } // while(used_clusters < m_k) + +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::createWords() +{ + m_words.resize(0); + + if(!m_nodes.empty()) + { + m_words.reserve( (int)pow((double)m_k, (double)m_L) ); + + typename std::vector::iterator nit; + + nit = m_nodes.begin(); // ignore root + for(++nit; nit != m_nodes.end(); ++nit) + { + if(nit->isLeaf()) + { + nit->word_id = m_words.size(); + m_words.push_back( &(*nit) ); + } + } + } +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::setNodeWeights + (const std::vector > &training_features) +{ + const unsigned int NWords = m_words.size(); + const unsigned int NDocs = training_features.size(); + + if(m_weighting == TF || m_weighting == BINARY) + { + // idf part must be 1 always + for(unsigned int i = 0; i < NWords; i++) + m_words[i]->weight = 1; + } + else if(m_weighting == IDF || m_weighting == TF_IDF) + { + // IDF and TF-IDF: we calculte the idf path now + + // Note: this actually calculates the idf part of the tf-idf score. + // The complete tf-idf score is calculated in ::transform + + std::vector Ni(NWords, 0); + std::vector counted(NWords, false); + + typename std::vector >::const_iterator mit; + typename std::vector::const_iterator fit; + + for(mit = training_features.begin(); mit != training_features.end(); ++mit) + { + fill(counted.begin(), counted.end(), false); + + for(fit = mit->begin(); fit < mit->end(); ++fit) + { + WordId word_id; + transform(*fit, word_id); + + if(!counted[word_id]) + { + Ni[word_id]++; + counted[word_id] = true; + } + } + } + + // set ln(N/Ni) + for(unsigned int i = 0; i < NWords; i++) + { + if(Ni[i] > 0) + { + m_words[i]->weight = log((double)NDocs / (double)Ni[i]); + }// else // This cannot occur if using kmeans++ + } + + } + +} + +// -------------------------------------------------------------------------- + +template +inline unsigned int TemplatedVocabulary::size() const +{ + return m_words.size(); +} + +// -------------------------------------------------------------------------- + +template +inline bool TemplatedVocabulary::empty() const +{ + return m_words.empty(); +} + +// -------------------------------------------------------------------------- + +template +float TemplatedVocabulary::getEffectiveLevels() const +{ + long sum = 0; + typename std::vector::const_iterator wit; + for(wit = m_words.begin(); wit != m_words.end(); ++wit) + { + const Node *p = *wit; + + for(; p->id != 0; sum++) p = &m_nodes[p->parent]; + } + + return (float)((double)sum / (double)m_words.size()); +} + +// -------------------------------------------------------------------------- + +template +TDescriptor TemplatedVocabulary::getWord(WordId wid) const +{ + return m_words[wid]->descriptor; +} + +// -------------------------------------------------------------------------- + +template +WordValue TemplatedVocabulary::getWordWeight(WordId wid) const +{ + return m_words[wid]->weight; +} + +// -------------------------------------------------------------------------- + +template +WordId TemplatedVocabulary::transform + (const TDescriptor& feature) const +{ + if(empty()) + { + return 0; + } + + WordId wid; + transform(feature, wid); + return wid; +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::transform( + const std::vector& features, BowVector &v) const +{ + v.clear(); + + if(empty()) + { + return; + } + + // normalize + LNorm norm; + bool must = m_scoring_object->mustNormalize(norm); + + typename std::vector::const_iterator fit; + + if(m_weighting == TF || m_weighting == TF_IDF) + { + for(fit = features.begin(); fit < features.end(); ++fit) + { + WordId id; + WordValue w; + // w is the idf value if TF_IDF, 1 if TF + + transform(*fit, id, w); + + // not stopped + if(w > 0) v.addWeight(id, w); + } + + if(!v.empty() && !must) + { + // unnecessary when normalizing + const double nd = v.size(); + for(BowVector::iterator vit = v.begin(); vit != v.end(); vit++) + vit->second /= nd; + } + + } + else // IDF || BINARY + { + for(fit = features.begin(); fit < features.end(); ++fit) + { + WordId id; + WordValue w; + // w is idf if IDF, or 1 if BINARY + + transform(*fit, id, w); + + // not stopped + if(w > 0) v.addIfNotExist(id, w); + + } // if add_features + } // if m_weighting == ... + + if(must) v.normalize(norm); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::transform( + const std::vector& features, + BowVector &v, FeatureVector &fv, int levelsup) const +{ + v.clear(); + fv.clear(); + + if(empty()) // safe for subclasses + { + return; + } + + // normalize + LNorm norm; + bool must = m_scoring_object->mustNormalize(norm); + + typename std::vector::const_iterator fit; + + if(m_weighting == TF || m_weighting == TF_IDF) + { + unsigned int i_feature = 0; + for(fit = features.begin(); fit < features.end(); ++fit, ++i_feature) + { + WordId id; + NodeId nid; + WordValue w; + // w is the idf value if TF_IDF, 1 if TF + + transform(*fit, id, w, &nid, levelsup); + + if(w > 0) // not stopped + { + v.addWeight(id, w); + fv.addFeature(nid, i_feature); + } + } + + if(!v.empty() && !must) + { + // unnecessary when normalizing + const double nd = v.size(); + for(BowVector::iterator vit = v.begin(); vit != v.end(); vit++) + vit->second /= nd; + } + + } + else // IDF || BINARY + { + unsigned int i_feature = 0; + for(fit = features.begin(); fit < features.end(); ++fit, ++i_feature) + { + WordId id; + NodeId nid; + WordValue w; + // w is idf if IDF, or 1 if BINARY + + transform(*fit, id, w, &nid, levelsup); + + if(w > 0) // not stopped + { + v.addIfNotExist(id, w); + fv.addFeature(nid, i_feature); + } + } + } // if m_weighting == ... + + if(must) v.normalize(norm); +} + +// -------------------------------------------------------------------------- + +template +inline double TemplatedVocabulary::score + (const BowVector &v1, const BowVector &v2) const +{ + return m_scoring_object->score(v1, v2); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::transform + (const TDescriptor &feature, WordId &id) const +{ + WordValue weight; + transform(feature, id, weight); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::transform(const TDescriptor &feature, + WordId &word_id, WordValue &weight, NodeId *nid, int levelsup) const +{ + // propagate the feature down the tree + std::vector nodes; + typename std::vector::const_iterator nit; + + // level at which the node must be stored in nid, if given + const int nid_level = m_L - levelsup; + if(nid_level <= 0 && nid != NULL) *nid = 0; // root + + NodeId final_id = 0; // root + int current_level = 0; + + do + { + ++current_level; + nodes = m_nodes[final_id].children; + final_id = nodes[0]; + + double best_d = F::distance(feature, m_nodes[final_id].descriptor); + + for(nit = nodes.begin() + 1; nit != nodes.end(); ++nit) + { + NodeId id = *nit; + double d = F::distance(feature, m_nodes[id].descriptor); + if(d < best_d) + { + best_d = d; + final_id = id; + } + } + + if(nid != NULL && current_level == nid_level) + *nid = final_id; + + } while( !m_nodes[final_id].isLeaf() ); + + // turn node id into word id + word_id = m_nodes[final_id].word_id; + weight = m_nodes[final_id].weight; +} + +// -------------------------------------------------------------------------- + +template +NodeId TemplatedVocabulary::getParentNode + (WordId wid, int levelsup) const +{ + NodeId ret = m_words[wid]->id; // node id + while(levelsup > 0 && ret != 0) // ret == 0 --> root + { + --levelsup; + ret = m_nodes[ret].parent; + } + return ret; +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::getWordsFromNode + (NodeId nid, std::vector &words) const +{ + words.clear(); + + if(m_nodes[nid].isLeaf()) + { + words.push_back(m_nodes[nid].word_id); + } + else + { + words.reserve(m_k); // ^1, ^2, ... + + std::vector parents; + parents.push_back(nid); + + while(!parents.empty()) + { + NodeId parentid = parents.back(); + parents.pop_back(); + + const std::vector &child_ids = m_nodes[parentid].children; + std::vector::const_iterator cit; + + for(cit = child_ids.begin(); cit != child_ids.end(); ++cit) + { + const Node &child_node = m_nodes[*cit]; + + if(child_node.isLeaf()) + words.push_back(child_node.word_id); + else + parents.push_back(*cit); + + } // for each child + } // while !parents.empty + } +} + +// -------------------------------------------------------------------------- + +template +int TemplatedVocabulary::stopWords(double minWeight) +{ + int c = 0; + typename std::vector::iterator wit; + for(wit = m_words.begin(); wit != m_words.end(); ++wit) + { + if((*wit)->weight < minWeight) + { + ++c; + (*wit)->weight = 0; + } + } + return c; +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::save(const std::string &filename) const +{ + cv::FileStorage fs(filename.c_str(), cv::FileStorage::WRITE); + if(!fs.isOpened()) throw std::string("Could not open file ") + filename; + + save(fs); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::load(const std::string &filename) +{ + cv::FileStorage fs(filename.c_str(), cv::FileStorage::READ); + if(!fs.isOpened()) throw std::string("Could not open file ") + filename; + + this->load(fs); +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::save(cv::FileStorage &f, + const std::string &name) const +{ + // Format YAML: + // vocabulary + // { + // k: + // L: + // scoringType: + // weightingType: + // nodes + // [ + // { + // nodeId: + // parentId: + // weight: + // descriptor: + // } + // ] + // words + // [ + // { + // wordId: + // nodeId: + // } + // ] + // } + // + // The root node (index 0) is not included in the node vector + // + + f << name << "{"; + + f << "k" << m_k; + f << "L" << m_L; + f << "scoringType" << m_scoring; + f << "weightingType" << m_weighting; + + // tree + f << "nodes" << "["; + std::vector parents, children; + std::vector::const_iterator pit; + + parents.push_back(0); // root + + while(!parents.empty()) + { + NodeId pid = parents.back(); + parents.pop_back(); + + const Node& parent = m_nodes[pid]; + children = parent.children; + + for(pit = children.begin(); pit != children.end(); pit++) + { + const Node& child = m_nodes[*pit]; + + // save node data + f << "{:"; + f << "nodeId" << (int)child.id; + f << "parentId" << (int)pid; + f << "weight" << (double)child.weight; + f << "descriptor" << F::toString(child.descriptor); + f << "}"; + + // add to parent list + if(!child.isLeaf()) + { + parents.push_back(*pit); + } + } + } + + f << "]"; // nodes + + // words + f << "words" << "["; + + typename std::vector::const_iterator wit; + for(wit = m_words.begin(); wit != m_words.end(); wit++) + { + WordId id = wit - m_words.begin(); + f << "{:"; + f << "wordId" << (int)id; + f << "nodeId" << (int)(*wit)->id; + f << "}"; + } + + f << "]"; // words + + f << "}"; + +} + +// -------------------------------------------------------------------------- + +template +void TemplatedVocabulary::load(const cv::FileStorage &fs, + const std::string &name) +{ + m_words.clear(); + m_nodes.clear(); + + cv::FileNode fvoc = fs[name]; + + m_k = (int)fvoc["k"]; + m_L = (int)fvoc["L"]; + m_scoring = (ScoringType)((int)fvoc["scoringType"]); + m_weighting = (WeightingType)((int)fvoc["weightingType"]); + + createScoringObject(); + + // nodes + cv::FileNode fn = fvoc["nodes"]; + + m_nodes.resize(fn.size() + 1); // +1 to include root + m_nodes[0].id = 0; + + for(unsigned int i = 0; i < fn.size(); ++i) + { + NodeId nid = (int)fn[i]["nodeId"]; + NodeId pid = (int)fn[i]["parentId"]; + WordValue weight = (WordValue)fn[i]["weight"]; + std::string d = (std::string)fn[i]["descriptor"]; + + m_nodes[nid].id = nid; + m_nodes[nid].parent = pid; + m_nodes[nid].weight = weight; + m_nodes[pid].children.push_back(nid); + + F::fromString(m_nodes[nid].descriptor, d); + } + + // words + fn = fvoc["words"]; + + m_words.resize(fn.size()); + + for(unsigned int i = 0; i < fn.size(); ++i) + { + NodeId wid = (int)fn[i]["wordId"]; + NodeId nid = (int)fn[i]["nodeId"]; + + m_nodes[nid].word_id = wid; + m_words[wid] = &m_nodes[nid]; + } +} + +// Added by VINS [[[ +template +void TemplatedVocabulary::loadBin(const std::string &filename) { + + m_words.clear(); + m_nodes.clear(); + //printf("loop load bin\n"); + std::ifstream ifStream(filename); + VINSLoop::Vocabulary voc; + voc.deserialize(ifStream); + ifStream.close(); + + m_k = voc.k; + m_L = voc.L; + m_scoring = (ScoringType)voc.scoringType; + m_weighting = (WeightingType)voc.weightingType; + + createScoringObject(); + + m_nodes.resize(voc.nNodes + 1); // +1 to include root + m_nodes[0].id = 0; + + for(unsigned int i = 0; i < voc.nNodes; ++i) + { + NodeId nid = voc.nodes[i].nodeId; + NodeId pid = voc.nodes[i].parentId; + WordValue weight = voc.nodes[i].weight; + + m_nodes[nid].id = nid; + m_nodes[nid].parent = pid; + m_nodes[nid].weight = weight; + m_nodes[pid].children.push_back(nid); + + // Sorry to break template here + m_nodes[nid].descriptor = boost::dynamic_bitset<>(voc.nodes[i].descriptor, voc.nodes[i].descriptor + 4); + + if (i < 5) { + std::string test; + boost::to_string(m_nodes[nid].descriptor, test); + //cout << "descriptor[" << i << "] = " << test << endl; + } + } + + // words + m_words.resize(voc.nWords); + + for(unsigned int i = 0; i < voc.nWords; ++i) + { + NodeId wid = (int)voc.words[i].wordId; + NodeId nid = (int)voc.words[i].nodeId; + + m_nodes[nid].word_id = wid; + m_words[wid] = &m_nodes[nid]; + } +} + +// Added by VINS ]]] + +// -------------------------------------------------------------------------- + +/** + * Writes printable information of the vocabulary + * @param os stream to write to + * @param voc + */ +template +std::ostream& operator<<(std::ostream &os, + const TemplatedVocabulary &voc) +{ + os << "Vocabulary: k = " << voc.getBranchingFactor() + << ", L = " << voc.getDepthLevels() + << ", Weighting = "; + + switch(voc.getWeightingType()) + { + case TF_IDF: os << "tf-idf"; break; + case TF: os << "tf"; break; + case IDF: os << "idf"; break; + case BINARY: os << "binary"; break; + } + + os << ", Scoring = "; + switch(voc.getScoringType()) + { + case L1_NORM: os << "L1-norm"; break; + case L2_NORM: os << "L2-norm"; break; + case CHI_SQUARE: os << "Chi square distance"; break; + case KL: os << "KL-divergence"; break; + case BHATTACHARYYA: os << "Bhattacharyya coefficient"; break; + case DOT_PRODUCT: os << "Dot product"; break; + } + + os << ", Number of words = " << voc.size(); + + return os; +} + +} // namespace DBoW2 + +#endif diff --git a/loop_fusion/src/ThirdParty/DUtils/DException.h b/loop_fusion/src/ThirdParty/DUtils/DException.h new file mode 100644 index 0000000..c8e7450 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DUtils/DException.h @@ -0,0 +1,65 @@ +/* + * File: DException.h + * Project: DUtils library + * Author: Dorian Galvez-Lopez + * Date: October 6, 2009 + * Description: general exception of the library + * License: see the LICENSE.txt file + * + */ + +#pragma once + +#ifndef __D_EXCEPTION__ +#define __D_EXCEPTION__ + +#include +#include +using namespace std; + +namespace DUtils { + +/// General exception +class DException : + public exception +{ +public: + /** + * Creates an exception with a general error message + */ + DException(void) throw(): m_message("DUtils exception"){} + + /** + * Creates an exception with a custom error message + * @param msg: message + */ + DException(const char *msg) throw(): m_message(msg){} + + /** + * Creates an exception with a custom error message + * @param msg: message + */ + DException(const string &msg) throw(): m_message(msg){} + + /** + * Destructor + */ + virtual ~DException(void) throw(){} + + /** + * Returns the exception message + */ + virtual const char* what() const throw() + { + return m_message.c_str(); + } + +protected: + /// Error message + string m_message; +}; + +} + +#endif + diff --git a/loop_fusion/src/ThirdParty/DUtils/DUtils.h b/loop_fusion/src/ThirdParty/DUtils/DUtils.h new file mode 100644 index 0000000..7d1829c --- /dev/null +++ b/loop_fusion/src/ThirdParty/DUtils/DUtils.h @@ -0,0 +1,48 @@ +/* + * File: DUtils.h + * Project: DUtils library + * Author: Dorian Galvez-Lopez + * Date: October 6, 2009 + * Description: include file for including all the library functionalities + * License: see the LICENSE.txt file + * + */ + +/*! \mainpage DUtils Library + * + * DUtils library for C++: + * Collection of classes with general utilities for C++ applications. + * + * Written by Dorian Galvez-Lopez, + * University of Zaragoza + * + * Check my website to obtain updates: http://webdiis.unizar.es/~dorian + * + * \section license License + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License (LGPL) as + * published by the Free Software Foundation, either version 3 of the License, + * or any later version. + * + */ + + +#pragma once + +#ifndef __D_UTILS__ +#define __D_UTILS__ + +/// Several utilities for C++ programs +namespace DUtils +{ +} + +// Exception +#include "DException.h" + +#include "Timestamp.h" +// Random numbers +#include "Random.h" + + +#endif diff --git a/loop_fusion/src/ThirdParty/DUtils/Random.cpp b/loop_fusion/src/ThirdParty/DUtils/Random.cpp new file mode 100644 index 0000000..79df2a6 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DUtils/Random.cpp @@ -0,0 +1,129 @@ +/* + * File: Random.cpp + * Project: DUtils library + * Author: Dorian Galvez-Lopez + * Date: April 2010 + * Description: manages pseudo-random numbers + * License: see the LICENSE.txt file + * + */ + +#include "Random.h" +#include "Timestamp.h" +#include +using namespace std; + +bool DUtils::Random::m_already_seeded = false; + +void DUtils::Random::SeedRand(){ + Timestamp time; + time.setToCurrentTime(); + srand((unsigned)time.getFloatTime()); +} + +void DUtils::Random::SeedRandOnce() +{ + if(!m_already_seeded) + { + DUtils::Random::SeedRand(); + m_already_seeded = true; + } +} + +void DUtils::Random::SeedRand(int seed) +{ + srand(seed); +} + +void DUtils::Random::SeedRandOnce(int seed) +{ + if(!m_already_seeded) + { + DUtils::Random::SeedRand(seed); + m_already_seeded = true; + } +} + +int DUtils::Random::RandomInt(int min, int max){ + int d = max - min + 1; + return int(((double)rand()/((double)RAND_MAX + 1.0)) * d) + min; +} + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- + +DUtils::Random::UnrepeatedRandomizer::UnrepeatedRandomizer(int min, int max) +{ + if(min <= max) + { + m_min = min; + m_max = max; + } + else + { + m_min = max; + m_max = min; + } + + createValues(); +} + +// --------------------------------------------------------------------------- + +DUtils::Random::UnrepeatedRandomizer::UnrepeatedRandomizer + (const DUtils::Random::UnrepeatedRandomizer& rnd) +{ + *this = rnd; +} + +// --------------------------------------------------------------------------- + +int DUtils::Random::UnrepeatedRandomizer::get() +{ + if(empty()) createValues(); + + DUtils::Random::SeedRandOnce(); + + int k = DUtils::Random::RandomInt(0, m_values.size()-1); + int ret = m_values[k]; + m_values[k] = m_values.back(); + m_values.pop_back(); + + return ret; +} + +// --------------------------------------------------------------------------- + +void DUtils::Random::UnrepeatedRandomizer::createValues() +{ + int n = m_max - m_min + 1; + + m_values.resize(n); + for(int i = 0; i < n; ++i) m_values[i] = m_min + i; +} + +// --------------------------------------------------------------------------- + +void DUtils::Random::UnrepeatedRandomizer::reset() +{ + if((int)m_values.size() != m_max - m_min + 1) createValues(); +} + +// --------------------------------------------------------------------------- + +DUtils::Random::UnrepeatedRandomizer& +DUtils::Random::UnrepeatedRandomizer::operator= + (const DUtils::Random::UnrepeatedRandomizer& rnd) +{ + if(this != &rnd) + { + this->m_min = rnd.m_min; + this->m_max = rnd.m_max; + this->m_values = rnd.m_values; + } + return *this; +} + +// --------------------------------------------------------------------------- + + diff --git a/loop_fusion/src/ThirdParty/DUtils/Random.h b/loop_fusion/src/ThirdParty/DUtils/Random.h new file mode 100644 index 0000000..5115dc4 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DUtils/Random.h @@ -0,0 +1,184 @@ +/* + * File: Random.h + * Project: DUtils library + * Author: Dorian Galvez-Lopez + * Date: April 2010, November 2011 + * Description: manages pseudo-random numbers + * License: see the LICENSE.txt file + * + */ + +#pragma once +#ifndef __D_RANDOM__ +#define __D_RANDOM__ + +#include +#include + +namespace DUtils { + +/// Functions to generate pseudo-random numbers +class Random +{ +public: + class UnrepeatedRandomizer; + +public: + /** + * Sets the random number seed to the current time + */ + static void SeedRand(); + + /** + * Sets the random number seed to the current time only the first + * time this function is called + */ + static void SeedRandOnce(); + + /** + * Sets the given random number seed + * @param seed + */ + static void SeedRand(int seed); + + /** + * Sets the given random number seed only the first time this function + * is called + * @param seed + */ + static void SeedRandOnce(int seed); + + /** + * Returns a random number in the range [0..1] + * @return random T number in [0..1] + */ + template + static T RandomValue(){ + return (T)rand()/(T)RAND_MAX; + } + + /** + * Returns a random number in the range [min..max] + * @param min + * @param max + * @return random T number in [min..max] + */ + template + static T RandomValue(T min, T max){ + return Random::RandomValue() * (max - min) + min; + } + + /** + * Returns a random int in the range [min..max] + * @param min + * @param max + * @return random int in [min..max] + */ + static int RandomInt(int min, int max); + + /** + * Returns a random number from a gaussian distribution + * @param mean + * @param sigma standard deviation + */ + template + static T RandomGaussianValue(T mean, T sigma) + { + // Box-Muller transformation + T x1, x2, w, y1; + + do { + x1 = (T)2. * RandomValue() - (T)1.; + x2 = (T)2. * RandomValue() - (T)1.; + w = x1 * x1 + x2 * x2; + } while ( w >= (T)1. || w == (T)0. ); + + w = sqrt( ((T)-2.0 * log( w ) ) / w ); + y1 = x1 * w; + + return( mean + y1 * sigma ); + } + +private: + + /// If SeedRandOnce() or SeedRandOnce(int) have already been called + static bool m_already_seeded; + +}; + +// --------------------------------------------------------------------------- + +/// Provides pseudo-random numbers with no repetitions +class Random::UnrepeatedRandomizer +{ +public: + + /** + * Creates a randomizer that returns numbers in the range [min, max] + * @param min + * @param max + */ + UnrepeatedRandomizer(int min, int max); + ~UnrepeatedRandomizer(){} + + /** + * Copies a randomizer + * @param rnd + */ + UnrepeatedRandomizer(const UnrepeatedRandomizer& rnd); + + /** + * Copies a randomizer + * @param rnd + */ + UnrepeatedRandomizer& operator=(const UnrepeatedRandomizer& rnd); + + /** + * Returns a random number not given before. If all the possible values + * were already given, the process starts again + * @return unrepeated random number + */ + int get(); + + /** + * Returns whether all the possible values between min and max were + * already given. If get() is called when empty() is true, the behaviour + * is the same than after creating the randomizer + * @return true iff all the values were returned + */ + inline bool empty() const { return m_values.empty(); } + + /** + * Returns the number of values still to be returned + * @return amount of values to return + */ + inline unsigned int left() const { return m_values.size(); } + + /** + * Resets the randomizer as it were just created + */ + void reset(); + +protected: + + /** + * Creates the vector with available values + */ + void createValues(); + +protected: + + /// Min of range of values + int m_min; + /// Max of range of values + int m_max; + + /// Available values + std::vector m_values; + +}; + +} + +#endif + diff --git a/loop_fusion/src/ThirdParty/DUtils/Timestamp.cpp b/loop_fusion/src/ThirdParty/DUtils/Timestamp.cpp new file mode 100644 index 0000000..4551839 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DUtils/Timestamp.cpp @@ -0,0 +1,246 @@ +/* + * File: Timestamp.cpp + * Author: Dorian Galvez-Lopez + * Date: March 2009 + * Description: timestamping functions + * + * Note: in windows, this class has a 1ms resolution + * + * License: see the LICENSE.txt file + * + */ + +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#ifndef WIN32 +#define WIN32 +#endif +#endif + +#ifdef WIN32 +#include +#define sprintf sprintf_s +#else +#include +#endif + +#include "Timestamp.h" + +using namespace std; + +using namespace DUtils; + +Timestamp::Timestamp(Timestamp::tOptions option) +{ + if(option & CURRENT_TIME) + setToCurrentTime(); + else if(option & ZERO) + setTime(0.); +} + +Timestamp::~Timestamp(void) +{ +} + +bool Timestamp::empty() const +{ + return m_secs == 0 && m_usecs == 0; +} + +void Timestamp::setToCurrentTime(){ + +#ifdef WIN32 + struct __timeb32 timebuffer; + _ftime32_s( &timebuffer ); // C4996 + // Note: _ftime is deprecated; consider using _ftime_s instead + m_secs = timebuffer.time; + m_usecs = timebuffer.millitm * 1000; +#else + struct timeval now; + gettimeofday(&now, NULL); + m_secs = now.tv_sec; + m_usecs = now.tv_usec; +#endif + +} + +void Timestamp::setTime(const string &stime){ + string::size_type p = stime.find('.'); + if(p == string::npos){ + m_secs = atol(stime.c_str()); + m_usecs = 0; + }else{ + m_secs = atol(stime.substr(0, p).c_str()); + + string s_usecs = stime.substr(p+1, 6); + m_usecs = atol(stime.substr(p+1).c_str()); + m_usecs *= (unsigned long)pow(10.0, double(6 - s_usecs.length())); + } +} + +void Timestamp::setTime(double s) +{ + m_secs = (unsigned long)s; + m_usecs = (s - (double)m_secs) * 1e6; +} + +double Timestamp::getFloatTime() const { + return double(m_secs) + double(m_usecs)/1000000.0; +} + +string Timestamp::getStringTime() const { + char buf[32]; + sprintf(buf, "%.6lf", this->getFloatTime()); + return string(buf); +} + +double Timestamp::operator- (const Timestamp &t) const { + return this->getFloatTime() - t.getFloatTime(); +} + +Timestamp& Timestamp::operator+= (double s) +{ + *this = *this + s; + return *this; +} + +Timestamp& Timestamp::operator-= (double s) +{ + *this = *this - s; + return *this; +} + +Timestamp Timestamp::operator+ (double s) const +{ + unsigned long secs = (long)floor(s); + unsigned long usecs = (long)((s - (double)secs) * 1e6); + + return this->plus(secs, usecs); +} + +Timestamp Timestamp::plus(unsigned long secs, unsigned long usecs) const +{ + Timestamp t; + + const unsigned long max = 1000000ul; + + if(m_usecs + usecs >= max) + t.setTime(m_secs + secs + 1, m_usecs + usecs - max); + else + t.setTime(m_secs + secs, m_usecs + usecs); + + return t; +} + +Timestamp Timestamp::operator- (double s) const +{ + unsigned long secs = (long)floor(s); + unsigned long usecs = (long)((s - (double)secs) * 1e6); + + return this->minus(secs, usecs); +} + +Timestamp Timestamp::minus(unsigned long secs, unsigned long usecs) const +{ + Timestamp t; + + const unsigned long max = 1000000ul; + + if(m_usecs < usecs) + t.setTime(m_secs - secs - 1, max - (usecs - m_usecs)); + else + t.setTime(m_secs - secs, m_usecs - usecs); + + return t; +} + +bool Timestamp::operator> (const Timestamp &t) const +{ + if(m_secs > t.m_secs) return true; + else if(m_secs == t.m_secs) return m_usecs > t.m_usecs; + else return false; +} + +bool Timestamp::operator>= (const Timestamp &t) const +{ + if(m_secs > t.m_secs) return true; + else if(m_secs == t.m_secs) return m_usecs >= t.m_usecs; + else return false; +} + +bool Timestamp::operator< (const Timestamp &t) const +{ + if(m_secs < t.m_secs) return true; + else if(m_secs == t.m_secs) return m_usecs < t.m_usecs; + else return false; +} + +bool Timestamp::operator<= (const Timestamp &t) const +{ + if(m_secs < t.m_secs) return true; + else if(m_secs == t.m_secs) return m_usecs <= t.m_usecs; + else return false; +} + +bool Timestamp::operator== (const Timestamp &t) const +{ + return(m_secs == t.m_secs && m_usecs == t.m_usecs); +} + + +string Timestamp::Format(bool machine_friendly) const +{ + struct tm tm_time; + + time_t t = (time_t)getFloatTime(); + +#ifdef WIN32 + localtime_s(&tm_time, &t); +#else + localtime_r(&t, &tm_time); +#endif + + char buffer[128]; + + if(machine_friendly) + { + strftime(buffer, 128, "%Y%m%d_%H%M%S", &tm_time); + } + else + { + strftime(buffer, 128, "%c", &tm_time); // Thu Aug 23 14:55:02 2001 + } + + return string(buffer); +} + +string Timestamp::Format(double s) { + int days = int(s / (24. * 3600.0)); + s -= days * (24. * 3600.0); + int hours = int(s / 3600.0); + s -= hours * 3600; + int minutes = int(s / 60.0); + s -= minutes * 60; + int seconds = int(s); + int ms = int((s - seconds)*1e6); + + stringstream ss; + ss.fill('0'); + bool b; + if((b = (days > 0))) ss << days << "d "; + if((b = (b || hours > 0))) ss << setw(2) << hours << ":"; + if((b = (b || minutes > 0))) ss << setw(2) << minutes << ":"; + if(b) ss << setw(2); + ss << seconds; + if(!b) ss << "." << setw(6) << ms; + + return ss.str(); +} + + diff --git a/loop_fusion/src/ThirdParty/DUtils/Timestamp.h b/loop_fusion/src/ThirdParty/DUtils/Timestamp.h new file mode 100644 index 0000000..b92f89f --- /dev/null +++ b/loop_fusion/src/ThirdParty/DUtils/Timestamp.h @@ -0,0 +1,204 @@ +/* + * File: Timestamp.h + * Author: Dorian Galvez-Lopez + * Date: March 2009 + * Description: timestamping functions + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_TIMESTAMP__ +#define __D_TIMESTAMP__ + +#include +using namespace std; + +namespace DUtils { + +/// Timestamp +class Timestamp +{ +public: + + /// Options to initiate a timestamp + enum tOptions + { + NONE = 0, + CURRENT_TIME = 0x1, + ZERO = 0x2 + }; + +public: + + /** + * Creates a timestamp + * @param option option to set the initial time stamp + */ + Timestamp(Timestamp::tOptions option = NONE); + + /** + * Destructor + */ + virtual ~Timestamp(void); + + /** + * Says if the timestamp is "empty": seconds and usecs are both 0, as + * when initiated with the ZERO flag + * @return true iif secs == usecs == 0 + */ + bool empty() const; + + /** + * Sets this instance to the current time + */ + void setToCurrentTime(); + + /** + * Sets the timestamp from seconds and microseconds + * @param secs: seconds + * @param usecs: microseconds + */ + inline void setTime(unsigned long secs, unsigned long usecs){ + m_secs = secs; + m_usecs = usecs; + } + + /** + * Returns the timestamp in seconds and microseconds + * @param secs seconds + * @param usecs microseconds + */ + inline void getTime(unsigned long &secs, unsigned long &usecs) const + { + secs = m_secs; + usecs = m_usecs; + } + + /** + * Sets the timestamp from a string with the time in seconds + * @param stime: string such as "1235603336.036609" + */ + void setTime(const string &stime); + + /** + * Sets the timestamp from a number of seconds from the epoch + * @param s seconds from the epoch + */ + void setTime(double s); + + /** + * Returns this timestamp as the number of seconds in (long) float format + */ + double getFloatTime() const; + + /** + * Returns this timestamp as the number of seconds in fixed length string format + */ + string getStringTime() const; + + /** + * Returns the difference in seconds between this timestamp (greater) and t (smaller) + * If the order is swapped, a negative number is returned + * @param t: timestamp to subtract from this timestamp + * @return difference in seconds + */ + double operator- (const Timestamp &t) const; + + /** + * Returns a copy of this timestamp + s seconds + us microseconds + * @param s seconds + * @param us microseconds + */ + Timestamp plus(unsigned long s, unsigned long us) const; + + /** + * Returns a copy of this timestamp - s seconds - us microseconds + * @param s seconds + * @param us microseconds + */ + Timestamp minus(unsigned long s, unsigned long us) const; + + /** + * Adds s seconds to this timestamp and returns a reference to itself + * @param s seconds + * @return reference to this timestamp + */ + Timestamp& operator+= (double s); + + /** + * Substracts s seconds to this timestamp and returns a reference to itself + * @param s seconds + * @return reference to this timestamp + */ + Timestamp& operator-= (double s); + + /** + * Returns a copy of this timestamp + s seconds + * @param s: seconds + */ + Timestamp operator+ (double s) const; + + /** + * Returns a copy of this timestamp - s seconds + * @param s: seconds + */ + Timestamp operator- (double s) const; + + /** + * Returns whether this timestamp is at the future of t + * @param t + */ + bool operator> (const Timestamp &t) const; + + /** + * Returns whether this timestamp is at the future of (or is the same as) t + * @param t + */ + bool operator>= (const Timestamp &t) const; + + /** + * Returns whether this timestamp and t represent the same instant + * @param t + */ + bool operator== (const Timestamp &t) const; + + /** + * Returns whether this timestamp is at the past of t + * @param t + */ + bool operator< (const Timestamp &t) const; + + /** + * Returns whether this timestamp is at the past of (or is the same as) t + * @param t + */ + bool operator<= (const Timestamp &t) const; + + /** + * Returns the timestamp in a human-readable string + * @param machine_friendly if true, the returned string is formatted + * to yyyymmdd_hhmmss, without weekday or spaces + * @note This has not been tested under Windows + * @note The timestamp is truncated to seconds + */ + string Format(bool machine_friendly = false) const; + + /** + * Returns a string version of the elapsed time in seconds, with the format + * xd hh:mm:ss, hh:mm:ss, mm:ss or s.us + * @param s: elapsed seconds (given by getFloatTime) to format + */ + static string Format(double s); + + +protected: + /// Seconds + unsigned long m_secs; // seconds + /// Microseconds + unsigned long m_usecs; // microseconds +}; + +} + +#endif + diff --git a/loop_fusion/src/ThirdParty/DVision/BRIEF.cpp b/loop_fusion/src/ThirdParty/DVision/BRIEF.cpp new file mode 100644 index 0000000..49940d1 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DVision/BRIEF.cpp @@ -0,0 +1,174 @@ +/** + * File: BRIEF.cpp + * Author: Dorian Galvez + * Date: September 2010 + * Description: implementation of BRIEF (Binary Robust Independent + * Elementary Features) descriptor by + * Michael Calonder, Vincent Lepetitand Pascal Fua + * + close binary tests (by Dorian Galvez-Lopez) + * License: see the LICENSE.txt file + * + */ + +#include "BRIEF.h" +#include "../DUtils/DUtils.h" +#include +#include + +using namespace std; +using namespace DVision; + +// ---------------------------------------------------------------------------- + +BRIEF::BRIEF(int nbits, int patch_size, Type type): + m_bit_length(nbits), m_patch_size(patch_size), m_type(type) +{ + assert(patch_size > 1); + assert(nbits > 0); + generateTestPoints(); +} + +// ---------------------------------------------------------------------------- + +BRIEF::~BRIEF() +{ +} + +// --------------------------------------------------------------------------- + +void BRIEF::compute(const cv::Mat &image, + const std::vector &points, + vector &descriptors, + bool treat_image) const +{ + const float sigma = 2.f; + const cv::Size ksize(9, 9); + + cv::Mat im; + if(treat_image) + { + cv::Mat aux; + if(image.depth() == 3) + { + cv::cvtColor(image, aux, cv::COLOR_RGB2GRAY); + } + else + { + aux = image; + } + + cv::GaussianBlur(aux, im, ksize, sigma, sigma); + + } + else + { + im = image; + } + + assert(im.type() == CV_8UC1); + assert(im.isContinuous()); + + // use im now + const int W = im.cols; + const int H = im.rows; + + descriptors.resize(points.size()); + std::vector::iterator dit; + + std::vector::const_iterator kit; + + int x1, y1, x2, y2; + + dit = descriptors.begin(); + for(kit = points.begin(); kit != points.end(); ++kit, ++dit) + { + dit->resize(m_bit_length); + dit->reset(); + + for(unsigned int i = 0; i < m_x1.size(); ++i) + { + x1 = (int)(kit->pt.x + m_x1[i]); + y1 = (int)(kit->pt.y + m_y1[i]); + x2 = (int)(kit->pt.x + m_x2[i]); + y2 = (int)(kit->pt.y + m_y2[i]); + + if(x1 >= 0 && x1 < W && y1 >= 0 && y1 < H + && x2 >= 0 && x2 < W && y2 >= 0 && y2 < H) + { + if( im.ptr(y1)[x1] < im.ptr(y2)[x2] ) + { + dit->set(i); + } + } // if (x,y)_1 and (x,y)_2 are in the image + + } // for each (x,y) + } // for each keypoint +} + +// --------------------------------------------------------------------------- + +void BRIEF::generateTestPoints() +{ + m_x1.resize(m_bit_length); + m_y1.resize(m_bit_length); + m_x2.resize(m_bit_length); + m_y2.resize(m_bit_length); + + const float g_mean = 0.f; + const float g_sigma = 0.2f * (float)m_patch_size; + const float c_sigma = 0.08f * (float)m_patch_size; + + float sigma2; + if(m_type == RANDOM) + sigma2 = g_sigma; + else + sigma2 = c_sigma; + + const int max_v = m_patch_size / 2; + + DUtils::Random::SeedRandOnce(); + + for(int i = 0; i < m_bit_length; ++i) + { + int x1, y1, x2, y2; + + do + { + x1 = DUtils::Random::RandomGaussianValue(g_mean, g_sigma); + } while( x1 > max_v || x1 < -max_v); + + do + { + y1 = DUtils::Random::RandomGaussianValue(g_mean, g_sigma); + } while( y1 > max_v || y1 < -max_v); + + float meanx, meany; + if(m_type == RANDOM) + meanx = meany = g_mean; + else + { + meanx = x1; + meany = y1; + } + + do + { + x2 = DUtils::Random::RandomGaussianValue(meanx, sigma2); + } while( x2 > max_v || x2 < -max_v); + + do + { + y2 = DUtils::Random::RandomGaussianValue(meany, sigma2); + } while( y2 > max_v || y2 < -max_v); + + m_x1[i] = x1; + m_y1[i] = y1; + m_x2[i] = x2; + m_y2[i] = y2; + } + +} + +// ---------------------------------------------------------------------------- + + diff --git a/loop_fusion/src/ThirdParty/DVision/BRIEF.h b/loop_fusion/src/ThirdParty/DVision/BRIEF.h new file mode 100644 index 0000000..78d9fbf --- /dev/null +++ b/loop_fusion/src/ThirdParty/DVision/BRIEF.h @@ -0,0 +1,196 @@ +/** + * File: BRIEF.h + * Author: Dorian Galvez-Lopez + * Date: March 2011 + * Description: implementation of BRIEF (Binary Robust Independent + * Elementary Features) descriptor by + * Michael Calonder, Vincent Lepetit and Pascal Fua + * + close binary tests (by Dorian Galvez-Lopez) + * + * If you use this code with the RANDOM_CLOSE descriptor version, please cite: + @INPROCEEDINGS{GalvezIROS11, + author={Galvez-Lopez, Dorian and Tardos, Juan D.}, + booktitle={Intelligent Robots and Systems (IROS), 2011 IEEE/RSJ International Conference on}, + title={Real-time loop detection with bags of binary words}, + year={2011}, + month={sept.}, + volume={}, + number={}, + pages={51 -58}, + keywords={}, + doi={10.1109/IROS.2011.6094885}, + ISSN={2153-0858} + } + * + * License: see the LICENSE.txt file + * + */ + +#ifndef __D_BRIEF__ +#define __D_BRIEF__ + +#include +#include +#include + +namespace DVision { + +/// BRIEF descriptor +class BRIEF +{ +public: + + /// Bitset type + typedef boost::dynamic_bitset<> bitset; + + /// Type of pairs + enum Type + { + RANDOM, // random pairs (Calonder's original version) + RANDOM_CLOSE, // random but close pairs (used in GalvezIROS11) + }; + +public: + + /** + * Creates the BRIEF a priori data for descriptors of nbits length + * @param nbits descriptor length in bits + * @param patch_size + * @param type type of pairs to generate + */ + BRIEF(int nbits = 256, int patch_size = 48, Type type = RANDOM_CLOSE); + virtual ~BRIEF(); + + /** + * Returns the descriptor length in bits + * @return descriptor length in bits + */ + inline int getDescriptorLengthInBits() const + { + return m_bit_length; + } + + /** + * Returns the type of classifier + */ + inline Type getType() const + { + return m_type; + } + + /** + * Returns the size of the patch + */ + inline int getPatchSize() const + { + return m_patch_size; + } + + /** + * Returns the BRIEF descriptors of the given keypoints in the given image + * @param image + * @param points + * @param descriptors + * @param treat_image (default: true) if true, the image is converted to + * grayscale if needed and smoothed. If not, it is assumed the image has + * been treated by the user + * @note this function is similar to BRIEF::compute + */ + inline void operator() (const cv::Mat &image, + const std::vector &points, + std::vector &descriptors, + bool treat_image = true) const + { + compute(image, points, descriptors, treat_image); + } + + /** + * Returns the BRIEF descriptors of the given keypoints in the given image + * @param image + * @param points + * @param descriptors + * @param treat_image (default: true) if true, the image is converted to + * grayscale if needed and smoothed. If not, it is assumed the image has + * been treated by the user + * @note this function is similar to BRIEF::operator() + */ + void compute(const cv::Mat &image, + const std::vector &points, + std::vector &descriptors, + bool treat_image = true) const; + + /** + * Exports the test pattern + * @param x1 x1 coordinates of pairs + * @param y1 y1 coordinates of pairs + * @param x2 x2 coordinates of pairs + * @param y2 y2 coordinates of pairs + */ + inline void exportPairs(std::vector &x1, std::vector &y1, + std::vector &x2, std::vector &y2) const + { + x1 = m_x1; + y1 = m_y1; + x2 = m_x2; + y2 = m_y2; + } + + /** + * Sets the test pattern + * @param x1 x1 coordinates of pairs + * @param y1 y1 coordinates of pairs + * @param x2 x2 coordinates of pairs + * @param y2 y2 coordinates of pairs + */ + inline void importPairs(const std::vector &x1, + const std::vector &y1, const std::vector &x2, + const std::vector &y2) + { + m_x1 = x1; + m_y1 = y1; + m_x2 = x2; + m_y2 = y2; + m_bit_length = x1.size(); + } + + /** + * Returns the Hamming distance between two descriptors + * @param a first descriptor vector + * @param b second descriptor vector + * @return hamming distance + */ + inline static int distance(const bitset &a, const bitset &b) + { + return (a^b).count(); + } + +protected: + + /** + * Generates random points in the patch coordinates, according to + * m_patch_size and m_bit_length + */ + void generateTestPoints(); + +protected: + + /// Descriptor length in bits + int m_bit_length; + + /// Patch size + int m_patch_size; + + /// Type of pairs + Type m_type; + + /// Coordinates of test points relative to the center of the patch + std::vector m_x1, m_x2; + std::vector m_y1, m_y2; + +}; + +} // namespace DVision + +#endif + + diff --git a/loop_fusion/src/ThirdParty/DVision/DVision.h b/loop_fusion/src/ThirdParty/DVision/DVision.h new file mode 100644 index 0000000..486d2b3 --- /dev/null +++ b/loop_fusion/src/ThirdParty/DVision/DVision.h @@ -0,0 +1,43 @@ +/* + * File: DVision.h + * Project: DVision library + * Author: Dorian Galvez-Lopez + * Date: October 4, 2010 + * Description: several functions for computer vision + * License: see the LICENSE.txt file + * + */ + +/*! \mainpage DVision Library + * + * DVision library for C++ and OpenCV: + * Collection of classes with computer vision functionality + * + * Written by Dorian Galvez-Lopez, + * University of Zaragoza + * + * Check my website to obtain updates: http://webdiis.unizar.es/~dorian + * + * \section requirements Requirements + * This library requires the DUtils and DUtilsCV libraries and the OpenCV library. + * + * \section license License + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License (LGPL) as + * published by the Free Software Foundation, either version 3 of the License, + * or any later version. + * + */ + +#ifndef __D_VISION__ +#define __D_VISION__ + + +/// Computer vision functions +namespace DVision +{ +} + +#include "BRIEF.h" + +#endif diff --git a/loop_fusion/src/ThirdParty/VocabularyBinary.cpp b/loop_fusion/src/ThirdParty/VocabularyBinary.cpp new file mode 100644 index 0000000..89b7d6c --- /dev/null +++ b/loop_fusion/src/ThirdParty/VocabularyBinary.cpp @@ -0,0 +1,35 @@ +#include "VocabularyBinary.hpp" +#include +using namespace std; + +VINSLoop::Vocabulary::Vocabulary() +: nNodes(0), nodes(nullptr), nWords(0), words(nullptr) { +} + +VINSLoop::Vocabulary::~Vocabulary() { + if (nodes != nullptr) { + delete [] nodes; + nodes = nullptr; + } + + if (words != nullptr) { + delete [] words; + words = nullptr; + } +} + +void VINSLoop::Vocabulary::serialize(ofstream& stream) { + stream.write((const char *)this, staticDataSize()); + stream.write((const char *)nodes, sizeof(Node) * nNodes); + stream.write((const char *)words, sizeof(Word) * nWords); +} + +void VINSLoop::Vocabulary::deserialize(ifstream& stream) { + stream.read((char *)this, staticDataSize()); + + nodes = new Node[nNodes]; + stream.read((char *)nodes, sizeof(Node) * nNodes); + + words = new Word[nWords]; + stream.read((char *)words, sizeof(Word) * nWords); +} diff --git a/loop_fusion/src/ThirdParty/VocabularyBinary.hpp b/loop_fusion/src/ThirdParty/VocabularyBinary.hpp new file mode 100644 index 0000000..912ba82 --- /dev/null +++ b/loop_fusion/src/ThirdParty/VocabularyBinary.hpp @@ -0,0 +1,47 @@ +#ifndef VocabularyBinary_hpp +#define VocabularyBinary_hpp + +#include +#include +#include + +namespace VINSLoop { + +struct Node { + int32_t nodeId; + int32_t parentId; + double weight; + uint64_t descriptor[4]; +}; + +struct Word { + int32_t nodeId; + int32_t wordId; +}; + +struct Vocabulary { + int32_t k; + int32_t L; + int32_t scoringType; + int32_t weightingType; + + int32_t nNodes; + int32_t nWords; + + Node* nodes; + Word* words; + + Vocabulary(); + ~Vocabulary(); + + void serialize(std::ofstream& stream); + void deserialize(std::ifstream& stream); + + inline static size_t staticDataSize() { + return sizeof(Vocabulary) - sizeof(Node*) - sizeof(Word*); + } +}; + +} + +#endif /* VocabularyBinary_hpp */ diff --git a/loop_fusion/src/keyframe.cpp b/loop_fusion/src/keyframe.cpp new file mode 100644 index 0000000..eb7d1e0 --- /dev/null +++ b/loop_fusion/src/keyframe.cpp @@ -0,0 +1,623 @@ +/******************************************************* + * Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology + * + * This file is part of VINS. + * + * Licensed under the GNU General Public License v3.0; + * you may not use this file except in compliance with the License. + * + * Author: Qin Tong (qintonguav@gmail.com) + *******************************************************/ + +#include "keyframe.h" + +template +static void reduceVector(vector &v, vector status) +{ + // changed by PI + int j = 0; + int range = 0; + if ( int(status.size()) <= int(v.size()) ){ + range = (int)status.size(); + } + else{ + range = (int)v.size(); + } + for (int i = 0; i < range; i++) + if (status[i]) + v[j++] = v[i]; + v.resize(j); +} + +// create keyframe online +KeyFrame::KeyFrame(double _time_stamp, int _index, Vector3d &_vio_T_w_i, Matrix3d &_vio_R_w_i, cv::Mat &_image, + vector &_point_3d, vector &_point_2d_uv, vector &_point_2d_norm, + vector &_point_id, int _sequence) +{ + time_stamp = _time_stamp; + index = _index; + vio_T_w_i = _vio_T_w_i; + vio_R_w_i = _vio_R_w_i; + T_w_i = vio_T_w_i; + R_w_i = vio_R_w_i; + origin_vio_T = vio_T_w_i; + origin_vio_R = vio_R_w_i; + image = _image.clone(); + cv::resize(image, thumbnail, cv::Size(80, 60)); + point_3d = _point_3d; + point_2d_uv = _point_2d_uv; + point_2d_norm = _point_2d_norm; + point_id = _point_id; + has_loop = false; + loop_index = -1; + has_fast_point = false; + loop_info << 0, 0, 0, 0, 0, 0, 0, 0; + sequence = _sequence; + computeWindowBRIEFPoint(); + computeBRIEFPoint(); + if(!DEBUG_IMAGE) + image.release(); +} + +// load previous keyframe +KeyFrame::KeyFrame(double _time_stamp, int _index, Vector3d &_vio_T_w_i, Matrix3d &_vio_R_w_i, Vector3d &_T_w_i, Matrix3d &_R_w_i, + cv::Mat &_image, int _loop_index, Eigen::Matrix &_loop_info, + vector &_keypoints, vector &_keypoints_norm, vector &_brief_descriptors) +{ + time_stamp = _time_stamp; + index = _index; + //vio_T_w_i = _vio_T_w_i; + //vio_R_w_i = _vio_R_w_i; + vio_T_w_i = _T_w_i; + vio_R_w_i = _R_w_i; + T_w_i = _T_w_i; + R_w_i = _R_w_i; + if (DEBUG_IMAGE) + { + image = _image.clone(); + cv::resize(image, thumbnail, cv::Size(80, 60)); + } + if (_loop_index != -1) + has_loop = true; + else + has_loop = false; + loop_index = _loop_index; + loop_info = _loop_info; + has_fast_point = false; + sequence = 0; + keypoints = _keypoints; + keypoints_norm = _keypoints_norm; + brief_descriptors = _brief_descriptors; +} + + +void KeyFrame::computeWindowBRIEFPoint() +{ + BriefExtractor extractor(BRIEF_PATTERN_FILE.c_str()); + for(int i = 0; i < (int)point_2d_uv.size(); i++) + { + cv::KeyPoint key; + key.pt = point_2d_uv[i]; + window_keypoints.push_back(key); + } + extractor(image, window_keypoints, window_brief_descriptors); +} + +void KeyFrame::computeBRIEFPoint() +{ + BriefExtractor extractor(BRIEF_PATTERN_FILE.c_str()); + const int fast_th = 10; // corner detector response threshold + if(1) + { + //cv::FAST(image, keypoints, fast_th, true); + Grider_FAST::perform_griding(image, keypoints, 200, 1, 1, fast_th, true); + } + else + { + vector tmp_pts; + cv::goodFeaturesToTrack(image, tmp_pts, 500, 0.01, 10); + for(int i = 0; i < (int)tmp_pts.size(); i++) + { + cv::KeyPoint key; + key.pt = tmp_pts[i]; + keypoints.push_back(key); + } + } + + // push back the uvs used in vio + for(int i = 0; i < (int)point_2d_uv.size(); i++) + { + cv::KeyPoint key; + key.pt = point_2d_uv[i]; + keypoints.push_back(key); + } + + // extract and save + extractor(image, keypoints, brief_descriptors); + for (int i = 0; i < (int)keypoints.size(); i++) + { + Eigen::Vector3d tmp_p; + m_camera->liftProjective(Eigen::Vector2d(keypoints[i].pt.x, keypoints[i].pt.y), tmp_p); + cv::KeyPoint tmp_norm; + tmp_norm.pt = cv::Point2f(tmp_p.x()/tmp_p.z(), tmp_p.y()/tmp_p.z()); + keypoints_norm.push_back(tmp_norm); + } +} + +void BriefExtractor::operator() (const cv::Mat &im, vector &keys, vector &descriptors) const +{ + m_brief.compute(im, keys, descriptors); +} + + +bool KeyFrame::searchInAera(const BRIEF::bitset window_descriptor, + const std::vector &descriptors_old, + const std::vector &keypoints_old, + const std::vector &keypoints_old_norm, + cv::Point2f &best_match, + cv::Point2f &best_match_norm) +{ + cv::Point2f best_pt; + int bestDist = 128; + int bestIndex = -1; + for(int i = 0; i < (int)descriptors_old.size(); i++) + { + + int dis = HammingDis(window_descriptor, descriptors_old[i]); + if(dis < bestDist) + { + bestDist = dis; + bestIndex = i; + } + } + //printf("[POSEGRAPH]: best dist %d", bestDist); + if (bestIndex != -1 && bestDist < 80) + { + best_match = keypoints_old[bestIndex].pt; + best_match_norm = keypoints_old_norm[bestIndex].pt; + return true; + } + else + return false; +} + +void KeyFrame::searchByBRIEFDes(std::vector &matched_2d_old, + std::vector &matched_2d_old_norm, + std::vector &status, + const std::vector &descriptors_old, + const std::vector &keypoints_old, + const std::vector &keypoints_old_norm) +{ + for(int i = 0; i < (int)window_brief_descriptors.size(); i++) + { + cv::Point2f pt(0.f, 0.f); + cv::Point2f pt_norm(0.f, 0.f); + if (searchInAera(window_brief_descriptors[i], descriptors_old, keypoints_old, keypoints_old_norm, pt, pt_norm)) + status.push_back(1); + else + status.push_back(0); + matched_2d_old.push_back(pt); + matched_2d_old_norm.push_back(pt_norm); + } + +} + + +void KeyFrame::FundmantalMatrixRANSAC(const std::vector &matched_2d_cur_norm, + const std::vector &matched_2d_old_norm, + vector &status) +{ + int n = (int)matched_2d_cur_norm.size(); + for (int i = 0; i < n; i++) + status.push_back(0); + if (n >= 8) + { + vector tmp_cur(n), tmp_old(n); + for (int i = 0; i < (int)matched_2d_cur_norm.size(); i++) + { + double FOCAL_LENGTH = 460.0; + double tmp_x, tmp_y; + tmp_x = FOCAL_LENGTH * matched_2d_cur_norm[i].x + COL / 2.0; + tmp_y = FOCAL_LENGTH * matched_2d_cur_norm[i].y + ROW / 2.0; + tmp_cur[i] = cv::Point2f(tmp_x, tmp_y); + + tmp_x = FOCAL_LENGTH * matched_2d_old_norm[i].x + COL / 2.0; + tmp_y = FOCAL_LENGTH * matched_2d_old_norm[i].y + ROW / 2.0; + tmp_old[i] = cv::Point2f(tmp_x, tmp_y); + } + cv::findFundamentalMat(tmp_cur, tmp_old, cv::FM_RANSAC, 3.0, 0.9, status); + } +} + +void KeyFrame::PnPRANSAC(const vector &matched_2d_old_norm, + const std::vector &matched_3d, + std::vector &status, + Eigen::Vector3d &PnP_T_old, Eigen::Matrix3d &PnP_R_old) +{ + //for (int i = 0; i < matched_3d.size(); i++) + // printf("[POSEGRAPH]: 3d x: %f, y: %f, z: %f\n",matched_3d[i].x, matched_3d[i].y, matched_3d[i].z ); + //printf("[POSEGRAPH]: match size %d \n", matched_3d.size()); + cv::Mat r, rvec, t, D, tmp_r; + cv::Mat K = (cv::Mat_(3, 3) << 1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0); + Matrix3d R_inital; + Vector3d P_inital; + Matrix3d R_w_c = origin_vio_R * qic; + Vector3d T_w_c = origin_vio_T + origin_vio_R * tic; + + R_inital = R_w_c.inverse(); + P_inital = -(R_inital * T_w_c); + + cv::eigen2cv(R_inital, tmp_r); + cv::Rodrigues(tmp_r, rvec); + cv::eigen2cv(P_inital, t); + + cv::Mat inliers; + TicToc t_pnp_ransac; + + int flags = cv::SOLVEPNP_EPNP; // SOLVEPNP_EPNP, SOLVEPNP_ITERATIVE + if (CV_MAJOR_VERSION < 3) + solvePnPRansac(matched_3d, matched_2d_old_norm, K, D, rvec, t, true, 200, PNP_INFLATION / max_focallength, 100, inliers, flags); + else + { + if (CV_MINOR_VERSION < 2) + solvePnPRansac(matched_3d, matched_2d_old_norm, K, D, rvec, t, true, 200, sqrt(PNP_INFLATION / max_focallength), 0.99, inliers, flags); + else + solvePnPRansac(matched_3d, matched_2d_old_norm, K, D, rvec, t, true, 200, PNP_INFLATION / max_focallength, 0.99, inliers, flags); + + } + + for (int i = 0; i < (int)matched_2d_old_norm.size(); i++) + status.push_back(0); + + for( int i = 0; i < inliers.rows; i++) + { + int n = inliers.at(i); + status[n] = 1; + } + + cv::Rodrigues(rvec, r); + Matrix3d R_pnp, R_w_c_old; + cv::cv2eigen(r, R_pnp); + R_w_c_old = R_pnp.transpose(); + Vector3d T_pnp, T_w_c_old; + cv::cv2eigen(t, T_pnp); + T_w_c_old = R_w_c_old * (-T_pnp); + + PnP_R_old = R_w_c_old * qic.transpose(); + PnP_T_old = T_w_c_old - PnP_R_old * tic; + +} + + +bool KeyFrame::findConnection(KeyFrame* old_kf) +{ + TicToc tmp_t; + //printf("[POSEGRAPH]: find Connection\n"); + vector matched_2d_cur, matched_2d_old; + vector matched_2d_cur_norm, matched_2d_old_norm; + vector matched_3d; + vector matched_id; + vector status; + + // re-undistort with the latest intrinsic values + for (int i = 0; i < (int)point_2d_uv.size(); i++) { + Eigen::Vector3d tmp_p; + m_camera->liftProjective(Eigen::Vector2d(point_2d_uv[i].x, point_2d_uv[i].y), tmp_p); + point_2d_norm.push_back(cv::Point2f(tmp_p.x()/tmp_p.z(), tmp_p.y()/tmp_p.z())); + } + old_kf->keypoints_norm.clear(); + for (int i = 0; i < (int)old_kf->keypoints.size(); i++) { + Eigen::Vector3d tmp_p; + m_camera->liftProjective(Eigen::Vector2d(old_kf->keypoints[i].pt.x, old_kf->keypoints[i].pt.y), tmp_p); + cv::KeyPoint tmp_norm; + tmp_norm.pt = cv::Point2f(tmp_p.x()/tmp_p.z(), tmp_p.y()/tmp_p.z()); + old_kf->keypoints_norm.push_back(tmp_norm); + } + + matched_3d = point_3d; + matched_2d_cur = point_2d_uv; + matched_id = point_id; + matched_2d_cur_norm = point_2d_norm; + + TicToc t_match; + #if 0 + if (DEBUG_IMAGE) + { + cv::Mat gray_img, loop_match_img; + cv::Mat old_img = old_kf->image; + cv::hconcat(image, old_img, gray_img); + cvtColor(gray_img, loop_match_img, CV_GRAY2RGB); + for(int i = 0; i< (int)point_2d_uv.size(); i++) + { + cv::Point2f cur_pt = point_2d_uv[i]; + cv::circle(loop_match_img, cur_pt, 5, cv::Scalar(0, 255, 0)); + } + for(int i = 0; i< (int)old_kf->keypoints.size(); i++) + { + cv::Point2f old_pt = old_kf->keypoints[i].pt; + old_pt.x += COL; + cv::circle(loop_match_img, old_pt, 5, cv::Scalar(0, 255, 0)); + } + ostringstream path; + path << "/home/tony-ws1/raw_data/loop_image/" + << index << "-" + << old_kf->index << "-" << "0raw_point.jpg"; + cv::imwrite( path.str().c_str(), loop_match_img); + } + #endif + //printf("[POSEGRAPH]: search by des\n"); + searchByBRIEFDes(matched_2d_old, matched_2d_old_norm, status, old_kf->brief_descriptors, old_kf->keypoints, old_kf->keypoints_norm); + reduceVector(matched_2d_cur, status); + reduceVector(matched_2d_old, status); + reduceVector(matched_2d_cur_norm, status); + reduceVector(matched_2d_old_norm, status); + reduceVector(matched_3d, status); + reduceVector(matched_id, status); + //printf("[POSEGRAPH]: search by des finish\n"); + + #if 0 + if (DEBUG_IMAGE) + { + int gap = 10; + cv::Mat gap_image(ROW, gap, CV_8UC1, cv::Scalar(255, 255, 255)); + cv::Mat gray_img, loop_match_img; + cv::Mat old_img = old_kf->image; + cv::hconcat(image, gap_image, gap_image); + cv::hconcat(gap_image, old_img, gray_img); + cvtColor(gray_img, loop_match_img, CV_GRAY2RGB); + for(int i = 0; i< (int)matched_2d_cur.size(); i++) + { + cv::Point2f cur_pt = matched_2d_cur[i]; + cv::circle(loop_match_img, cur_pt, 5, cv::Scalar(0, 255, 0)); + } + for(int i = 0; i< (int)matched_2d_old.size(); i++) + { + cv::Point2f old_pt = matched_2d_old[i]; + old_pt.x += (COL + gap); + cv::circle(loop_match_img, old_pt, 5, cv::Scalar(0, 255, 0)); + } + for (int i = 0; i< (int)matched_2d_cur.size(); i++) + { + cv::Point2f old_pt = matched_2d_old[i]; + old_pt.x += (COL + gap); + cv::line(loop_match_img, matched_2d_cur[i], old_pt, cv::Scalar(0, 255, 0), 1, 8, 0); + } + + ostringstream path, path1, path2; + path << "/home/tony-ws1/raw_data/loop_image/" + << index << "-" + << old_kf->index << "-" << "1descriptor_match.jpg"; + cv::imwrite( path.str().c_str(), loop_match_img); + /* + path1 << "/home/tony-ws1/raw_data/loop_image/" + << index << "-" + << old_kf->index << "-" << "1descriptor_match_1.jpg"; + cv::imwrite( path1.str().c_str(), image); + path2 << "/home/tony-ws1/raw_data/loop_image/" + << index << "-" + << old_kf->index << "-" << "1descriptor_match_2.jpg"; + cv::imwrite( path2.str().c_str(), old_img); + */ + + } + #endif + status.clear(); + /* + FundmantalMatrixRANSAC(matched_2d_cur_norm, matched_2d_old_norm, status); + reduceVector(matched_2d_cur, status); + reduceVector(matched_2d_old, status); + reduceVector(matched_2d_cur_norm, status); + reduceVector(matched_2d_old_norm, status); + reduceVector(matched_3d, status); + reduceVector(matched_id, status); + */ + #if 0 + if (DEBUG_IMAGE) + { + int gap = 10; + cv::Mat gap_image(ROW, gap, CV_8UC1, cv::Scalar(255, 255, 255)); + cv::Mat gray_img, loop_match_img; + cv::Mat old_img = old_kf->image; + cv::hconcat(image, gap_image, gap_image); + cv::hconcat(gap_image, old_img, gray_img); + cvtColor(gray_img, loop_match_img, CV_GRAY2RGB); + for(int i = 0; i< (int)matched_2d_cur.size(); i++) + { + cv::Point2f cur_pt = matched_2d_cur[i]; + cv::circle(loop_match_img, cur_pt, 5, cv::Scalar(0, 255, 0)); + } + for(int i = 0; i< (int)matched_2d_old.size(); i++) + { + cv::Point2f old_pt = matched_2d_old[i]; + old_pt.x += (COL + gap); + cv::circle(loop_match_img, old_pt, 5, cv::Scalar(0, 255, 0)); + } + for (int i = 0; i< (int)matched_2d_cur.size(); i++) + { + cv::Point2f old_pt = matched_2d_old[i]; + old_pt.x += (COL + gap) ; + cv::line(loop_match_img, matched_2d_cur[i], old_pt, cv::Scalar(0, 255, 0), 1, 8, 0); + } + + ostringstream path; + path << "/home/tony-ws1/raw_data/loop_image/" + << index << "-" + << old_kf->index << "-" << "2fundamental_match.jpg"; + cv::imwrite( path.str().c_str(), loop_match_img); + } + #endif + Eigen::Vector3d PnP_T_old; + Eigen::Matrix3d PnP_R_old; + Eigen::Vector3d relative_t; + Quaterniond relative_q; + double relative_yaw; + if ((int)matched_2d_cur.size() > MIN_LOOP_NUM) + { + status.clear(); + PnPRANSAC(matched_2d_old_norm, matched_3d, status, PnP_T_old, PnP_R_old); + reduceVector(matched_2d_cur, status); + reduceVector(matched_2d_old, status); + reduceVector(matched_2d_cur_norm, status); + reduceVector(matched_2d_old_norm, status); + reduceVector(matched_3d, status); + reduceVector(matched_id, status); + #if 1 + if (DEBUG_IMAGE) + { + int gap = 10; + cv::Mat gap_image(old_kf->image.rows, gap, CV_8UC1, cv::Scalar(255, 255, 255)); + cv::Mat gray_img, loop_match_img; + cv::Mat old_img = old_kf->image; + cv::hconcat(image, gap_image, gap_image); + cv::hconcat(gap_image, old_img, gray_img); + cvtColor(gray_img, loop_match_img, CV_GRAY2RGB); + for(int i = 0; i< (int)matched_2d_cur.size(); i++) + { + cv::Point2f cur_pt = matched_2d_cur[i]; + cv::circle(loop_match_img, cur_pt, 5, cv::Scalar(0, 255, 0)); + } + for(int i = 0; i< (int)matched_2d_old.size(); i++) + { + cv::Point2f old_pt = matched_2d_old[i]; + old_pt.x += (old_kf->image.cols + gap); + cv::circle(loop_match_img, old_pt, 5, cv::Scalar(0, 255, 0)); + } + for (int i = 0; i< (int)matched_2d_cur.size(); i++) + { + cv::Point2f old_pt = matched_2d_old[i]; + old_pt.x += (old_kf->image.cols + gap) ; + cv::line(loop_match_img, matched_2d_cur[i], old_pt, cv::Scalar(0, 255, 0), 2, 8, 0); + } + cv::Mat notation(50, old_kf->image.cols + gap + old_kf->image.cols, CV_8UC3, cv::Scalar(255, 255, 255)); + putText(notation, "current frame: " + to_string(index) + " sequence: " + to_string(sequence), cv::Point2f(20, 30), CV_FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(255), 3); + + putText(notation, "previous frame: " + to_string(old_kf->index) + " sequence: " + to_string(old_kf->sequence), cv::Point2f(20 + old_kf->image.cols + gap, 30), CV_FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(255), 3); + cv::vconcat(notation, loop_match_img, loop_match_img); + + /* + ostringstream path; + path << "/home/tony-ws1/raw_data/loop_image/" + << index << "-" + << old_kf->index << "-" << "3pnp_match.jpg"; + cv::imwrite( path.str().c_str(), loop_match_img); + */ + if ((int)matched_2d_cur.size() > MIN_LOOP_NUM) + { + /* + cv::imshow("loop connection",loop_match_img); + cv::waitKey(10); + */ + cv::Mat thumbimage; + cv::resize(loop_match_img, thumbimage, cv::Size(loop_match_img.cols / 2, loop_match_img.rows / 2)); + sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", thumbimage).toImageMsg(); + msg->header.stamp = ros::Time(time_stamp); + pub_match_img.publish(msg); + } + } + #endif + } + + if ((int)matched_2d_cur.size() > MIN_LOOP_NUM) + { + relative_t = PnP_R_old.transpose() * (origin_vio_T - PnP_T_old); + relative_q = PnP_R_old.transpose() * origin_vio_R; + relative_yaw = Utility::normalizeAngle(Utility::R2ypr(origin_vio_R).x() - Utility::R2ypr(PnP_R_old).x()); + //printf("[POSEGRAPH]: PNP relative\n"); + //cout << "pnp relative_t " << relative_t.transpose() << endl; + //cout << "pnp relative_yaw " << relative_yaw << endl; + if (abs(relative_yaw) < MAX_THETA_DIFF && relative_t.norm() < MAX_POS_DIFF) + { + + has_loop = true; + loop_index = old_kf->index; + loop_info << relative_t.x(), relative_t.y(), relative_t.z(), + relative_q.w(), relative_q.x(), relative_q.y(), relative_q.z(), + relative_yaw; + //cout << "pnp relative_t " << relative_t.transpose() << endl; + //cout << "pnp relative_q " << relative_q.w() << " " << relative_q.vec().transpose() << endl; + return true; + } + } + //printf("[POSEGRAPH]: loop final use num %d %lf--------------- \n", (int)matched_2d_cur.size(), t_match.toc()); + return false; +} + + +int KeyFrame::HammingDis(const BRIEF::bitset &a, const BRIEF::bitset &b) +{ + BRIEF::bitset xor_of_bitset = a ^ b; + int dis = xor_of_bitset.count(); + return dis; +} + +void KeyFrame::getVioPose(Eigen::Vector3d &_T_w_i, Eigen::Matrix3d &_R_w_i) +{ + _T_w_i = vio_T_w_i; + _R_w_i = vio_R_w_i; +} + +void KeyFrame::getPose(Eigen::Vector3d &_T_w_i, Eigen::Matrix3d &_R_w_i) +{ + _T_w_i = T_w_i; + _R_w_i = R_w_i; +} + +void KeyFrame::updatePose(const Eigen::Vector3d &_T_w_i, const Eigen::Matrix3d &_R_w_i) +{ + T_w_i = _T_w_i; + R_w_i = _R_w_i; +} + +void KeyFrame::updateVioPose(const Eigen::Vector3d &_T_w_i, const Eigen::Matrix3d &_R_w_i) +{ + vio_T_w_i = _T_w_i; + vio_R_w_i = _R_w_i; + T_w_i = vio_T_w_i; + R_w_i = vio_R_w_i; +} + +Eigen::Vector3d KeyFrame::getLoopRelativeT() +{ + return Eigen::Vector3d(loop_info(0), loop_info(1), loop_info(2)); +} + +Eigen::Quaterniond KeyFrame::getLoopRelativeQ() +{ + return Eigen::Quaterniond(loop_info(3), loop_info(4), loop_info(5), loop_info(6)); +} + +double KeyFrame::getLoopRelativeYaw() +{ + return loop_info(7); +} + +void KeyFrame::updateLoop(Eigen::Matrix &_loop_info) +{ + if (abs(_loop_info(7)) < 30.0 && Vector3d(_loop_info(0), _loop_info(1), _loop_info(2)).norm() < 20.0) + { + //printf("[POSEGRAPH]: update loop info\n"); + loop_info = _loop_info; + } +} + +BriefExtractor::BriefExtractor(const std::string &pattern_file) +{ + // The DVision::BRIEF extractor computes a random pattern by default when + // the object is created. + // We load the pattern that we used to build the vocabulary, to make + // the descriptors compatible with the predefined vocabulary + + // loads the pattern + cv::FileStorage fs(pattern_file.c_str(), cv::FileStorage::READ); + if(!fs.isOpened()) throw string("Could not open file ") + pattern_file; + + vector x1, y1, x2, y2; + fs["x1"] >> x1; + fs["x2"] >> x2; + fs["y1"] >> y1; + fs["y2"] >> y2; + + m_brief.importPairs(x1, y1, x2, y2); +} + + diff --git a/loop_fusion/src/keyframe.h b/loop_fusion/src/keyframe.h new file mode 100644 index 0000000..04836f7 --- /dev/null +++ b/loop_fusion/src/keyframe.h @@ -0,0 +1,114 @@ +/******************************************************* + * Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology + * + * This file is part of VINS. + * + * Licensed under the GNU General Public License v3.0; + * you may not use this file except in compliance with the License. + * + * Author: Qin Tong (qintonguav@gmail.com) + *******************************************************/ + +#pragma once + +#include +#include +#include +#include +#include "camodocal/camera_models/CameraFactory.h" +#include "camodocal/camera_models/CataCamera.h" +#include "camodocal/camera_models/PinholeCamera.h" +#include "utility/tic_toc.h" +#include "utility/utility.h" +#include "utility/Grider_FAST.h" +#include "parameters.h" +#include "ThirdParty/DBoW/DBoW2.h" +#include "ThirdParty/DVision/DVision.h" + +using namespace Eigen; +using namespace std; +using namespace DVision; + + +class BriefExtractor +{ +public: + virtual void operator()(const cv::Mat &im, vector &keys, vector &descriptors) const; + BriefExtractor(const std::string &pattern_file); + + DVision::BRIEF m_brief; +}; + +class KeyFrame +{ +public: + KeyFrame(double _time_stamp, int _index, Vector3d &_vio_T_w_i, Matrix3d &_vio_R_w_i, cv::Mat &_image, + vector &_point_3d, vector &_point_2d_uv, vector &_point_2d_normal, + vector &_point_id, int _sequence); + KeyFrame(double _time_stamp, int _index, Vector3d &_vio_T_w_i, Matrix3d &_vio_R_w_i, Vector3d &_T_w_i, Matrix3d &_R_w_i, + cv::Mat &_image, int _loop_index, Eigen::Matrix &_loop_info, + vector &_keypoints, vector &_keypoints_norm, vector &_brief_descriptors); + bool findConnection(KeyFrame* old_kf); + void computeWindowBRIEFPoint(); + void computeBRIEFPoint(); + //void extractBrief(); + int HammingDis(const BRIEF::bitset &a, const BRIEF::bitset &b); + bool searchInAera(const BRIEF::bitset window_descriptor, + const std::vector &descriptors_old, + const std::vector &keypoints_old, + const std::vector &keypoints_old_norm, + cv::Point2f &best_match, + cv::Point2f &best_match_norm); + void searchByBRIEFDes(std::vector &matched_2d_old, + std::vector &matched_2d_old_norm, + std::vector &status, + const std::vector &descriptors_old, + const std::vector &keypoints_old, + const std::vector &keypoints_old_norm); + void FundmantalMatrixRANSAC(const std::vector &matched_2d_cur_norm, + const std::vector &matched_2d_old_norm, + vector &status); + void PnPRANSAC(const vector &matched_2d_old_norm, + const std::vector &matched_3d, + std::vector &status, + Eigen::Vector3d &PnP_T_old, Eigen::Matrix3d &PnP_R_old); + void getVioPose(Eigen::Vector3d &_T_w_i, Eigen::Matrix3d &_R_w_i); + void getPose(Eigen::Vector3d &_T_w_i, Eigen::Matrix3d &_R_w_i); + void updatePose(const Eigen::Vector3d &_T_w_i, const Eigen::Matrix3d &_R_w_i); + void updateVioPose(const Eigen::Vector3d &_T_w_i, const Eigen::Matrix3d &_R_w_i); + void updateLoop(Eigen::Matrix &_loop_info); + + Eigen::Vector3d getLoopRelativeT(); + double getLoopRelativeYaw(); + Eigen::Quaterniond getLoopRelativeQ(); + + + + double time_stamp; + int index; + int local_index; + Eigen::Vector3d vio_T_w_i; + Eigen::Matrix3d vio_R_w_i; + Eigen::Vector3d T_w_i; + Eigen::Matrix3d R_w_i; + Eigen::Vector3d origin_vio_T; + Eigen::Matrix3d origin_vio_R; + cv::Mat image; + cv::Mat thumbnail; + vector point_3d; + vector point_2d_uv; + vector point_2d_norm; + vector point_id; + vector keypoints; + vector keypoints_norm; + vector window_keypoints; + vector brief_descriptors; + vector window_brief_descriptors; + bool has_fast_point; + int sequence; + + bool has_loop; + int loop_index; + Eigen::Matrix loop_info; +}; + diff --git a/loop_fusion/src/parameters.h b/loop_fusion/src/parameters.h new file mode 100644 index 0000000..7ee7b65 --- /dev/null +++ b/loop_fusion/src/parameters.h @@ -0,0 +1,44 @@ +/******************************************************* + * Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology + * + * This file is part of VINS. + * + * Licensed under the GNU General Public License v3.0; + * you may not use this file except in compliance with the License. + * + * Author: Qin Tong (qintonguav@gmail.com) + *******************************************************/ + +#pragma once + +#include "camodocal/camera_models/CameraFactory.h" +#include "camodocal/camera_models/CataCamera.h" +#include "camodocal/camera_models/PinholeCamera.h" +#include +#include +#include +#include +#include +#include + +extern camodocal::CameraPtr m_camera; +extern double max_focallength; +extern double MIN_SCORE; +extern double PNP_INFLATION; +extern int RECALL_IGNORE_RECENT_COUNT; +extern double MAX_THETA_DIFF; +extern double MAX_POS_DIFF; +extern int MIN_LOOP_NUM; +extern Eigen::Vector3d tic; +extern Eigen::Matrix3d qic; +extern ros::Publisher pub_match_img; +extern int VISUALIZATION_SHIFT_X; +extern int VISUALIZATION_SHIFT_Y; +extern std::string BRIEF_PATTERN_FILE; +extern std::string POSE_GRAPH_SAVE_PATH; +extern int ROW; +extern int COL; +extern std::string VINS_RESULT_PATH; +extern int DEBUG_IMAGE; + + diff --git a/loop_fusion/src/pose_graph.cpp b/loop_fusion/src/pose_graph.cpp new file mode 100644 index 0000000..934bfab --- /dev/null +++ b/loop_fusion/src/pose_graph.cpp @@ -0,0 +1,1133 @@ +/******************************************************* + * Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology + * + * This file is part of VINS. + * + * Licensed under the GNU General Public License v3.0; + * you may not use this file except in compliance with the License. + * + * Author: Qin Tong (qintonguav@gmail.com) + *******************************************************/ + +#include "pose_graph.h" + +PoseGraph::PoseGraph() +{ + posegraph_visualization = new CameraPoseVisualization(1.0, 0.0, 1.0, 1.0); + posegraph_visualization->setScale(0.1); + posegraph_visualization->setLineWidth(0.01); + earliest_loop_index = -1; + t_drift = Eigen::Vector3d(0, 0, 0); + yaw_drift = 0; + r_drift = Eigen::Matrix3d::Identity(); + w_t_vio = Eigen::Vector3d(0, 0, 0); + w_r_vio = Eigen::Matrix3d::Identity(); + global_index = 0; + sequence_cnt = 0; + sequence_loop.push_back(0); + base_sequence = 1; + use_imu = 0; +} + +PoseGraph::~PoseGraph() +{ + t_optimization.detach(); +} + +void PoseGraph::registerPub(ros::NodeHandle &n) +{ + pub_pg_path = n.advertise("pose_graph_path", 1000); + pub_base_path = n.advertise("base_path", 1000); + pub_pose_graph = n.advertise("pose_graph", 1000); + for (int i = 1; i < 10; i++) + pub_path[i] = n.advertise("path_" + to_string(i), 1000); +} + +void PoseGraph::setIMUFlag(bool _use_imu) +{ + use_imu = _use_imu; + if(use_imu) + { + printf("[POSEGRAPH]: VIO input, perfrom 4 DoF (x, y, z, yaw) pose graph optimization\n"); + t_optimization = std::thread(&PoseGraph::optimize4DoF, this); + } + else + { + printf("[POSEGRAPH]: VO input, perfrom 6 DoF pose graph optimization\n"); + t_optimization = std::thread(&PoseGraph::optimize6DoF, this); + } + +} + +void PoseGraph::loadVocabulary(std::string voc_path) +{ + voc = new BriefVocabulary(voc_path); + db.setVocabulary(*voc, false, 0); +} + +void PoseGraph::addKeyFrame(KeyFrame* cur_kf, bool flag_detect_loop) +{ + //shift to base frame + Vector3d vio_P_cur; + Matrix3d vio_R_cur; + if (sequence_cnt != cur_kf->sequence) + { + sequence_cnt++; + sequence_loop.push_back(0); + w_t_vio = Eigen::Vector3d(0, 0, 0); + w_r_vio = Eigen::Matrix3d::Identity(); + m_drift.lock(); + t_drift = Eigen::Vector3d(0, 0, 0); + r_drift = Eigen::Matrix3d::Identity(); + m_drift.unlock(); + } + + cur_kf->getVioPose(vio_P_cur, vio_R_cur); + vio_P_cur = w_r_vio * vio_P_cur + w_t_vio; + vio_R_cur = w_r_vio * vio_R_cur; + cur_kf->updateVioPose(vio_P_cur, vio_R_cur); + cur_kf->index = global_index; + global_index++; + int loop_index = -1; + if (flag_detect_loop) + { + TicToc tmp_t; + loop_index = detectLoop(cur_kf, cur_kf->index); + } + else + { + addKeyFrameIntoVoc(cur_kf); + } + if (loop_index != -1) + { + //printf("[POSEGRAPH]: %d detect loop with %d \n", cur_kf->index, loop_index); + KeyFrame* old_kf = getKeyFrame(loop_index); + + if ((cur_kf->has_loop && loop_index == old_kf->index) || cur_kf->findConnection(old_kf)) + { + if (earliest_loop_index > loop_index || earliest_loop_index == -1) + earliest_loop_index = loop_index; + + Vector3d w_P_old, w_P_cur, vio_P_cur; + Matrix3d w_R_old, w_R_cur, vio_R_cur; + old_kf->getVioPose(w_P_old, w_R_old); + cur_kf->getVioPose(vio_P_cur, vio_R_cur); + + Vector3d relative_t; + Quaterniond relative_q; + relative_t = cur_kf->getLoopRelativeT(); + relative_q = (cur_kf->getLoopRelativeQ()).toRotationMatrix(); + w_P_cur = w_R_old * relative_t + w_P_old; + w_R_cur = w_R_old * relative_q; + double shift_yaw; + Matrix3d shift_r; + Vector3d shift_t; + if(use_imu) + { + shift_yaw = Utility::R2ypr(w_R_cur).x() - Utility::R2ypr(vio_R_cur).x(); + shift_r = Utility::ypr2R(Vector3d(shift_yaw, 0, 0)); + } + else + shift_r = w_R_cur * vio_R_cur.transpose(); + shift_t = w_P_cur - w_R_cur * vio_R_cur.transpose() * vio_P_cur; + // shift vio pose of whole sequence to the world frame + if (old_kf->sequence != cur_kf->sequence && sequence_loop[cur_kf->sequence] == 0) + { + w_r_vio = shift_r; + w_t_vio = shift_t; + vio_P_cur = w_r_vio * vio_P_cur + w_t_vio; + vio_R_cur = w_r_vio * vio_R_cur; + cur_kf->updateVioPose(vio_P_cur, vio_R_cur); + list::iterator it = keyframelist.begin(); + for (; it != keyframelist.end(); it++) + { + if((*it)->sequence == cur_kf->sequence) + { + Vector3d vio_P_cur; + Matrix3d vio_R_cur; + (*it)->getVioPose(vio_P_cur, vio_R_cur); + vio_P_cur = w_r_vio * vio_P_cur + w_t_vio; + vio_R_cur = w_r_vio * vio_R_cur; + (*it)->updateVioPose(vio_P_cur, vio_R_cur); + } + } + sequence_loop[cur_kf->sequence] = 1; + } + m_optimize_buf.lock(); + optimize_buf.push(cur_kf->index); + m_optimize_buf.unlock(); + } + } + m_keyframelist.lock(); + Vector3d P; + Matrix3d R; + cur_kf->getVioPose(P, R); + P = r_drift * P + t_drift; + R = r_drift * R; + cur_kf->updatePose(P, R); + Quaterniond Q{R}; + geometry_msgs::PoseStamped pose_stamped; + pose_stamped.header.stamp = ros::Time(cur_kf->time_stamp); + pose_stamped.header.frame_id = "global"; + pose_stamped.pose.position.x = P.x() + VISUALIZATION_SHIFT_X; + pose_stamped.pose.position.y = P.y() + VISUALIZATION_SHIFT_Y; + pose_stamped.pose.position.z = P.z(); + pose_stamped.pose.orientation.x = Q.x(); + pose_stamped.pose.orientation.y = Q.y(); + pose_stamped.pose.orientation.z = Q.z(); + pose_stamped.pose.orientation.w = Q.w(); + path[sequence_cnt].poses.push_back(pose_stamped); + path[sequence_cnt].header = pose_stamped.header; + + if (SAVE_LOOP_PATH) + { + ofstream loop_path_file(VINS_RESULT_PATH, ios::app); + loop_path_file.setf(ios::fixed, ios::floatfield); + loop_path_file.precision(0); + loop_path_file << cur_kf->time_stamp * 1e9 << ","; + loop_path_file.precision(5); + loop_path_file << P.x() << "," + << P.y() << "," + << P.z() << "," + << Q.w() << "," + << Q.x() << "," + << Q.y() << "," + << Q.z() << "," + << endl; + loop_path_file.close(); + } + //draw local connection + if (SHOW_S_EDGE) + { + list::reverse_iterator rit = keyframelist.rbegin(); + for (int i = 0; i < 4; i++) + { + if (rit == keyframelist.rend()) + break; + Vector3d conncected_P; + Matrix3d connected_R; + if((*rit)->sequence == cur_kf->sequence) + { + (*rit)->getPose(conncected_P, connected_R); + posegraph_visualization->add_edge(P, conncected_P); + } + rit++; + } + } + if (SHOW_L_EDGE) + { + if (cur_kf->has_loop) + { + //printf("[POSEGRAPH]: has loop \n"); + KeyFrame* connected_KF = getKeyFrame(cur_kf->loop_index); + Vector3d connected_P,P0; + Matrix3d connected_R,R0; + connected_KF->getPose(connected_P, connected_R); + //cur_kf->getVioPose(P0, R0); + cur_kf->getPose(P0, R0); + if(cur_kf->sequence > 0) + { + //printf("[POSEGRAPH]: add loop into visual \n"); + posegraph_visualization->add_loopedge(P0, connected_P + Vector3d(VISUALIZATION_SHIFT_X, VISUALIZATION_SHIFT_Y, 0)); + } + + } + } + //posegraph_visualization->add_pose(P + Vector3d(VISUALIZATION_SHIFT_X, VISUALIZATION_SHIFT_Y, 0), Q); + + keyframelist.push_back(cur_kf); + publish(); + m_keyframelist.unlock(); +} + + +void PoseGraph::loadKeyFrame(KeyFrame* cur_kf, bool flag_detect_loop) +{ + cur_kf->index = global_index; + global_index++; + int loop_index = -1; + if (flag_detect_loop) + loop_index = detectLoop(cur_kf, cur_kf->index); + else + { + addKeyFrameIntoVoc(cur_kf); + } + if (loop_index != -1) + { + printf("[POSEGRAPH]: %d detect loop with %d \n", cur_kf->index, loop_index); + KeyFrame* old_kf = getKeyFrame(loop_index); + if (cur_kf->findConnection(old_kf)) + { + if (earliest_loop_index > loop_index || earliest_loop_index == -1) + earliest_loop_index = loop_index; + m_optimize_buf.lock(); + optimize_buf.push(cur_kf->index); + m_optimize_buf.unlock(); + } + } + m_keyframelist.lock(); + Vector3d P; + Matrix3d R; + cur_kf->getPose(P, R); + Quaterniond Q{R}; + geometry_msgs::PoseStamped pose_stamped; + pose_stamped.header.stamp = ros::Time(cur_kf->time_stamp); + pose_stamped.header.frame_id = "global"; + pose_stamped.pose.position.x = P.x() + VISUALIZATION_SHIFT_X; + pose_stamped.pose.position.y = P.y() + VISUALIZATION_SHIFT_Y; + pose_stamped.pose.position.z = P.z(); + pose_stamped.pose.orientation.x = Q.x(); + pose_stamped.pose.orientation.y = Q.y(); + pose_stamped.pose.orientation.z = Q.z(); + pose_stamped.pose.orientation.w = Q.w(); + base_path.poses.push_back(pose_stamped); + base_path.header = pose_stamped.header; + + //draw local connection + if (SHOW_S_EDGE) + { + list::reverse_iterator rit = keyframelist.rbegin(); + for (int i = 0; i < 1; i++) + { + if (rit == keyframelist.rend()) + break; + Vector3d conncected_P; + Matrix3d connected_R; + if((*rit)->sequence == cur_kf->sequence) + { + (*rit)->getPose(conncected_P, connected_R); + posegraph_visualization->add_edge(P, conncected_P); + } + rit++; + } + } + /* + if (cur_kf->has_loop) + { + KeyFrame* connected_KF = getKeyFrame(cur_kf->loop_index); + Vector3d connected_P; + Matrix3d connected_R; + connected_KF->getPose(connected_P, connected_R); + posegraph_visualization->add_loopedge(P, connected_P, SHIFT); + } + */ + + keyframelist.push_back(cur_kf); + //publish(); + m_keyframelist.unlock(); +} + +KeyFrame* PoseGraph::getKeyFrame(int index) +{ +// unique_lock lock(m_keyframelist); + list::iterator it = keyframelist.begin(); + for (; it != keyframelist.end(); it++) + { + if((*it)->index == index) + break; + } + if (it != keyframelist.end()) + return *it; + else + return NULL; +} + +int PoseGraph::detectLoop(KeyFrame* keyframe, int frame_index) +{ + // put image into image_pool; for visualization + cv::Mat compressed_image; + if (DEBUG_IMAGE) + { + int feature_num = keyframe->keypoints.size(); + cv::resize(keyframe->image, compressed_image, cv::Size(376, 240)); + putText(compressed_image, "feature_num:" + to_string(feature_num), cv::Point2f(10, 10), CV_FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255)); + image_pool[frame_index] = compressed_image; + } + TicToc tmp_t; + //first query; then add this frame into database! + QueryResults ret; + TicToc t_query; + int max_frame_id_allowed = std::max(0, frame_index - RECALL_IGNORE_RECENT_COUNT); + db.query(keyframe->brief_descriptors, ret, 3, max_frame_id_allowed); + //printf("[POSEGRAPH]: query time: %f", t_query.toc()); + //cout << "Searching for Image " << frame_index << ". " << ret << endl; + + TicToc t_add; + db.add(keyframe->brief_descriptors); + //printf("[POSEGRAPH]: add feature time: %f", t_add.toc()); + // ret[0] is the nearest neighbour's score. threshold change with neighour score + bool find_loop = false; + cv::Mat loop_result; + if (DEBUG_IMAGE) + { + loop_result = compressed_image.clone(); + if (ret.size() > 0) + putText(loop_result, "neighbour score:" + to_string(ret[0].Score), cv::Point2f(10, 50), CV_FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(255)); + } + // visual loop result + if (DEBUG_IMAGE) + { + for (unsigned int i = 0; i < ret.size(); i++) + { + int tmp_index = ret[i].Id; + auto it = image_pool.find(tmp_index); + cv::Mat tmp_image = (it->second).clone(); + putText(tmp_image, "index: " + to_string(tmp_index) + "loop score:" + to_string(ret[i].Score), cv::Point2f(10, 50), CV_FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(255)); + cv::hconcat(loop_result, tmp_image, loop_result); + } + } + + //for (unsigned int i = 0; i < ret.size(); i++) + // cout << i << " - " << ret[i].Score << endl; + + // a good match with its nerghbour + //if (ret.size() >= 1 && ret[0].Score > MIN_SCORE) + { + for (unsigned int i = 0; i < ret.size(); i++) + { + //if (ret[i].Score > ret[0].Score * 0.3) + if (ret[i].Score > MIN_SCORE && ret[i].Id < max_frame_id_allowed) + { + find_loop = true; + int tmp_index = ret[i].Id; + if (DEBUG_IMAGE && 0) + { + auto it = image_pool.find(tmp_index); + cv::Mat tmp_image = (it->second).clone(); + putText(tmp_image, "loop score:" + to_string(ret[i].Score), cv::Point2f(10, 50), CV_FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255)); + cv::hconcat(loop_result, tmp_image, loop_result); + } + } + + } + + } +/* + if (DEBUG_IMAGE) + { + cv::imshow("loop_result", loop_result); + cv::waitKey(20); + } +*/ + //if (find_loop && frame_index > 50) + if (frame_index < 50) + return -1; + + // Loop through all, and see if we have one that is a good match! + std::vector done_ids; + while(done_ids.size() < ret.size()) + { + + // First find the oldest that we have not tried yet + int min_index = INFINITY; + bool has_min = false; + for (unsigned int i = 0; i < ret.size(); i++) + { + if (ret[i].Id < min_index && ret[i].Id < max_frame_id_allowed && ret[i].Score > MIN_SCORE && std::find(done_ids.begin(),done_ids.end(),ret[i].Id)==done_ids.end()) + { + min_index = ret[i].Id; + has_min = true; + } + } + + // Break out if we have not found a min + if(!has_min) + return -1; + + // Then try to see if we can loop close with it + KeyFrame* old_kf = getKeyFrame(min_index); + if(keyframe->findConnection(old_kf)) return min_index; + else done_ids.push_back(min_index); + + } + + // failure + return -1; + + +} + +void PoseGraph::addKeyFrameIntoVoc(KeyFrame* keyframe) +{ + // put image into image_pool; for visualization + cv::Mat compressed_image; + if (DEBUG_IMAGE) + { + int feature_num = keyframe->keypoints.size(); + cv::resize(keyframe->image, compressed_image, cv::Size(376, 240)); + putText(compressed_image, "feature_num:" + to_string(feature_num), cv::Point2f(10, 10), CV_FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255)); + image_pool[keyframe->index] = compressed_image; + } + + db.add(keyframe->brief_descriptors); +} + +void PoseGraph::optimize4DoF() +{ + while(true) + { + int cur_index = -1; + int first_looped_index = -1; + m_optimize_buf.lock(); + while(!optimize_buf.empty()) + { + cur_index = optimize_buf.front(); + first_looped_index = earliest_loop_index; + optimize_buf.pop(); + } + m_optimize_buf.unlock(); + if (cur_index != -1) + { + //printf("[POSEGRAPH]: optimize pose graph \n"); + TicToc tmp_t1; + m_keyframelist.lock(); + KeyFrame* cur_kf = getKeyFrame(cur_index); + + int max_length = cur_index + 1; + + // w^t_i w^q_i + double t_array[max_length][3]; + Quaterniond q_array[max_length]; + double euler_array[max_length][3]; + double sequence_array[max_length]; + + ceres::Problem problem; + ceres::Solver::Options options; + options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY; + //options.minimizer_progress_to_stdout = true; + options.max_solver_time_in_seconds = 5; + options.max_num_iterations = 20; + options.num_threads = 1; + ceres::Solver::Summary summary; + ceres::LossFunction *loss_function; + loss_function = new ceres::HuberLoss(0.1); + //loss_function = new ceres::CauchyLoss(1.0); + ceres::LocalParameterization* angle_local_parameterization = AngleLocalParameterization::Create(); + + list::iterator it; + + int i = 0; + for (it = keyframelist.begin(); it != keyframelist.end(); it++) + { + if ((*it)->index < first_looped_index) + continue; + (*it)->local_index = i; + Quaterniond tmp_q; + Matrix3d tmp_r; + Vector3d tmp_t; + (*it)->getVioPose(tmp_t, tmp_r); + tmp_q = tmp_r; + t_array[i][0] = tmp_t(0); + t_array[i][1] = tmp_t(1); + t_array[i][2] = tmp_t(2); + q_array[i] = tmp_q; + + Vector3d euler_angle = Utility::R2ypr(tmp_q.toRotationMatrix()); + euler_array[i][0] = euler_angle.x(); + euler_array[i][1] = euler_angle.y(); + euler_array[i][2] = euler_angle.z(); + + sequence_array[i] = (*it)->sequence; + + problem.AddParameterBlock(euler_array[i], 1, angle_local_parameterization); + problem.AddParameterBlock(t_array[i], 3); + + if ((*it)->index == first_looped_index || (*it)->sequence == 0) + { + problem.SetParameterBlockConstant(euler_array[i]); + problem.SetParameterBlockConstant(t_array[i]); + } + + //add edge + for (int j = 1; j < 5; j++) + { + if (i - j >= 0 && sequence_array[i] == sequence_array[i-j]) + { + Vector3d euler_conncected = Utility::R2ypr(q_array[i-j].toRotationMatrix()); + Vector3d relative_t(t_array[i][0] - t_array[i-j][0], t_array[i][1] - t_array[i-j][1], t_array[i][2] - t_array[i-j][2]); + relative_t = q_array[i-j].inverse() * relative_t; + double relative_yaw = euler_array[i][0] - euler_array[i-j][0]; + ceres::CostFunction* cost_function = FourDOFError::Create( relative_t.x(), relative_t.y(), relative_t.z(), + relative_yaw, euler_conncected.y(), euler_conncected.z()); + problem.AddResidualBlock(cost_function, NULL, euler_array[i-j], + t_array[i-j], + euler_array[i], + t_array[i]); + } + } + + //add loop edge + if((*it)->has_loop) + { + assert((*it)->loop_index >= first_looped_index); + int connected_index = getKeyFrame((*it)->loop_index)->local_index; + Vector3d euler_conncected = Utility::R2ypr(q_array[connected_index].toRotationMatrix()); + Vector3d relative_t; + relative_t = (*it)->getLoopRelativeT(); + double relative_yaw = (*it)->getLoopRelativeYaw(); + ceres::CostFunction* cost_function = FourDOFWeightError::Create( relative_t.x(), relative_t.y(), relative_t.z(), + relative_yaw, euler_conncected.y(), euler_conncected.z()); + problem.AddResidualBlock(cost_function, loss_function, euler_array[connected_index], + t_array[connected_index], + euler_array[i], + t_array[i]); + + } + + if ((*it)->index == cur_index) + break; + i++; + } + m_keyframelist.unlock(); + double t_create = tmp_t1.toc(); + + + TicToc tmp_t2; + ceres::Solve(options, &problem, &summary); + double t_opt = tmp_t2.toc(); + //std::cout << summary.BriefReport() << "\n"; + + //printf("[POSEGRAPH]: pose optimization time: %f \n", tmp_t.toc()); + /* + for (int j = 0 ; j < i; j++) + { + printf("[POSEGRAPH]: optimize i: %d p: %f, %f, %f\n", j, t_array[j][0], t_array[j][1], t_array[j][2] ); + } + */ + TicToc tmp_t3; + m_keyframelist.lock(); + i = 0; + for (it = keyframelist.begin(); it != keyframelist.end(); it++) + { + if ((*it)->index < first_looped_index) + continue; + Quaterniond tmp_q; + tmp_q = Utility::ypr2R(Vector3d(euler_array[i][0], euler_array[i][1], euler_array[i][2])); + Vector3d tmp_t = Vector3d(t_array[i][0], t_array[i][1], t_array[i][2]); + Matrix3d tmp_r = tmp_q.toRotationMatrix(); + (*it)-> updatePose(tmp_t, tmp_r); + + if ((*it)->index == cur_index) + break; + i++; + } + + Vector3d cur_t, vio_t; + Matrix3d cur_r, vio_r; + cur_kf->getPose(cur_t, cur_r); + cur_kf->getVioPose(vio_t, vio_r); + m_drift.lock(); + yaw_drift = Utility::R2ypr(cur_r).x() - Utility::R2ypr(vio_r).x(); + r_drift = Utility::ypr2R(Vector3d(yaw_drift, 0, 0)); + t_drift = cur_t - r_drift * vio_t; + m_drift.unlock(); + //cout << "t_drift " << t_drift.transpose() << endl; + //cout << "r_drift " << Utility::R2ypr(r_drift).transpose() << endl; + //cout << "yaw drift " << yaw_drift << endl; + + it++; + for (; it != keyframelist.end(); it++) + { + Vector3d P; + Matrix3d R; + (*it)->getVioPose(P, R); + P = r_drift * P + t_drift; + R = r_drift * R; + (*it)->updatePose(P, R); + } + m_keyframelist.unlock(); + updatePath(); + double t_update = tmp_t3.toc(); + + // Nice debug print + printf("[POSEGRAPH]: creation %.3f ms | optimization %.3f ms | update %.3f ms | %.3f dyaw, %.3f dpos\n", t_create, t_opt, t_update, yaw_drift, t_drift.norm()); + + + } + + std::chrono::milliseconds dura(2000); + std::this_thread::sleep_for(dura); + } + return; +} + + +void PoseGraph::optimize6DoF() +{ + while(true) + { + int cur_index = -1; + int first_looped_index = -1; + m_optimize_buf.lock(); + while(!optimize_buf.empty()) + { + cur_index = optimize_buf.front(); + first_looped_index = earliest_loop_index; + optimize_buf.pop(); + } + m_optimize_buf.unlock(); + if (cur_index != -1) + { + //printf("[POSEGRAPH]: optimize pose graph \n"); + TicToc tmp_t; + m_keyframelist.lock(); + KeyFrame* cur_kf = getKeyFrame(cur_index); + + int max_length = cur_index + 1; + + // w^t_i w^q_i + double t_array[max_length][3]; + double q_array[max_length][4]; + double sequence_array[max_length]; + + ceres::Problem problem; + ceres::Solver::Options options; + options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY; + //ptions.minimizer_progress_to_stdout = true; + options.max_solver_time_in_seconds = 5; + options.max_num_iterations = 20; + options.num_threads = 1; + ceres::Solver::Summary summary; + ceres::LossFunction *loss_function; + loss_function = new ceres::HuberLoss(0.1); + //loss_function = new ceres::CauchyLoss(1.0); + ceres::LocalParameterization* local_parameterization = new ceres::QuaternionParameterization(); + + list::iterator it; + + int i = 0; + for (it = keyframelist.begin(); it != keyframelist.end(); it++) + { + if ((*it)->index < first_looped_index) + continue; + (*it)->local_index = i; + Quaterniond tmp_q; + Matrix3d tmp_r; + Vector3d tmp_t; + (*it)->getVioPose(tmp_t, tmp_r); + tmp_q = tmp_r; + t_array[i][0] = tmp_t(0); + t_array[i][1] = tmp_t(1); + t_array[i][2] = tmp_t(2); + q_array[i][0] = tmp_q.w(); + q_array[i][1] = tmp_q.x(); + q_array[i][2] = tmp_q.y(); + q_array[i][3] = tmp_q.z(); + + sequence_array[i] = (*it)->sequence; + + problem.AddParameterBlock(q_array[i], 4, local_parameterization); + problem.AddParameterBlock(t_array[i], 3); + + if ((*it)->index == first_looped_index || (*it)->sequence == 0) + { + problem.SetParameterBlockConstant(q_array[i]); + problem.SetParameterBlockConstant(t_array[i]); + } + + //add edge + for (int j = 1; j < 5; j++) + { + if (i - j >= 0 && sequence_array[i] == sequence_array[i-j]) + { + Vector3d relative_t(t_array[i][0] - t_array[i-j][0], t_array[i][1] - t_array[i-j][1], t_array[i][2] - t_array[i-j][2]); + Quaterniond q_i_j = Quaterniond(q_array[i-j][0], q_array[i-j][1], q_array[i-j][2], q_array[i-j][3]); + Quaterniond q_i = Quaterniond(q_array[i][0], q_array[i][1], q_array[i][2], q_array[i][3]); + relative_t = q_i_j.inverse() * relative_t; + Quaterniond relative_q = q_i_j.inverse() * q_i; + ceres::CostFunction* vo_function = RelativeRTError::Create(relative_t.x(), relative_t.y(), relative_t.z(), + relative_q.w(), relative_q.x(), relative_q.y(), relative_q.z(), + 0.1, 0.01); + problem.AddResidualBlock(vo_function, NULL, q_array[i-j], t_array[i-j], q_array[i], t_array[i]); + } + } + + //add loop edge + + if((*it)->has_loop) + { + assert((*it)->loop_index >= first_looped_index); + int connected_index = getKeyFrame((*it)->loop_index)->local_index; + Vector3d relative_t; + relative_t = (*it)->getLoopRelativeT(); + Quaterniond relative_q; + relative_q = (*it)->getLoopRelativeQ(); + ceres::CostFunction* loop_function = RelativeRTError::Create(relative_t.x(), relative_t.y(), relative_t.z(), + relative_q.w(), relative_q.x(), relative_q.y(), relative_q.z(), + 0.1, 0.01); + problem.AddResidualBlock(loop_function, loss_function, q_array[connected_index], t_array[connected_index], q_array[i], t_array[i]); + } + + if ((*it)->index == cur_index) + break; + i++; + } + m_keyframelist.unlock(); + + ceres::Solve(options, &problem, &summary); + //std::cout << summary.BriefReport() << "\n"; + + //printf("[POSEGRAPH]: pose optimization time: %f \n", tmp_t.toc()); + /* + for (int j = 0 ; j < i; j++) + { + printf("[POSEGRAPH]: optimize i: %d p: %f, %f, %f\n", j, t_array[j][0], t_array[j][1], t_array[j][2] ); + } + */ + m_keyframelist.lock(); + i = 0; + for (it = keyframelist.begin(); it != keyframelist.end(); it++) + { + if ((*it)->index < first_looped_index) + continue; + Quaterniond tmp_q(q_array[i][0], q_array[i][1], q_array[i][2], q_array[i][3]); + Vector3d tmp_t = Vector3d(t_array[i][0], t_array[i][1], t_array[i][2]); + Matrix3d tmp_r = tmp_q.toRotationMatrix(); + (*it)-> updatePose(tmp_t, tmp_r); + + if ((*it)->index == cur_index) + break; + i++; + } + + Vector3d cur_t, vio_t; + Matrix3d cur_r, vio_r; + cur_kf->getPose(cur_t, cur_r); + cur_kf->getVioPose(vio_t, vio_r); + m_drift.lock(); + r_drift = cur_r * vio_r.transpose(); + t_drift = cur_t - r_drift * vio_t; + m_drift.unlock(); + //cout << "t_drift " << t_drift.transpose() << endl; + //cout << "r_drift " << Utility::R2ypr(r_drift).transpose() << endl; + + it++; + for (; it != keyframelist.end(); it++) + { + Vector3d P; + Matrix3d R; + (*it)->getVioPose(P, R); + P = r_drift * P + t_drift; + R = r_drift * R; + (*it)->updatePose(P, R); + } + m_keyframelist.unlock(); + updatePath(); + + // Nice debug print + printf("[POSEGRAPH]: pose optimization in %.3f seconds | %.3f dori, %.3f dpos\n", tmp_t.toc(), Utility::R2ypr(r_drift).norm(), t_drift.norm()); + + } + + std::chrono::milliseconds dura(2000); + std::this_thread::sleep_for(dura); + } + return; +} + +void PoseGraph::updatePath() +{ + m_keyframelist.lock(); + list::iterator it; + for (int i = 1; i <= sequence_cnt; i++) + { + path[i].poses.clear(); + } + base_path.poses.clear(); + posegraph_visualization->reset(); + + if (SAVE_LOOP_PATH) + { + ofstream loop_path_file_tmp(VINS_RESULT_PATH, ios::out); + loop_path_file_tmp.close(); + } + + for (it = keyframelist.begin(); it != keyframelist.end(); it++) + { + Vector3d P; + Matrix3d R; + (*it)->getPose(P, R); + Quaterniond Q; + Q = R; +// printf("[POSEGRAPH]: path p: %f, %f, %f\n", P.x(), P.z(), P.y() ); + + geometry_msgs::PoseStamped pose_stamped; + pose_stamped.header.stamp = ros::Time((*it)->time_stamp); + pose_stamped.header.frame_id = "global"; + pose_stamped.pose.position.x = P.x() + VISUALIZATION_SHIFT_X; + pose_stamped.pose.position.y = P.y() + VISUALIZATION_SHIFT_Y; + pose_stamped.pose.position.z = P.z(); + pose_stamped.pose.orientation.x = Q.x(); + pose_stamped.pose.orientation.y = Q.y(); + pose_stamped.pose.orientation.z = Q.z(); + pose_stamped.pose.orientation.w = Q.w(); + if((*it)->sequence == 0) + { + base_path.poses.push_back(pose_stamped); + base_path.header = pose_stamped.header; + } + else + { + path[(*it)->sequence].poses.push_back(pose_stamped); + path[(*it)->sequence].header = pose_stamped.header; + } + + if (SAVE_LOOP_PATH) + { + ofstream loop_path_file(VINS_RESULT_PATH, ios::app); + loop_path_file.setf(ios::fixed, ios::floatfield); + loop_path_file.precision(0); + loop_path_file << (*it)->time_stamp * 1e9 << ","; + loop_path_file.precision(5); + loop_path_file << P.x() << "," + << P.y() << "," + << P.z() << "," + << Q.w() << "," + << Q.x() << "," + << Q.y() << "," + << Q.z() << "," + << endl; + loop_path_file.close(); + } + //draw local connection + if (SHOW_S_EDGE) + { + list::reverse_iterator rit = keyframelist.rbegin(); + list::reverse_iterator lrit; + for (; rit != keyframelist.rend(); rit++) + { + if ((*rit)->index == (*it)->index) + { + lrit = rit; + lrit++; + for (int i = 0; i < 4; i++) + { + if (lrit == keyframelist.rend()) + break; + if((*lrit)->sequence == (*it)->sequence) + { + Vector3d conncected_P; + Matrix3d connected_R; + (*lrit)->getPose(conncected_P, connected_R); + posegraph_visualization->add_edge(P, conncected_P); + } + lrit++; + } + break; + } + } + } + if (SHOW_L_EDGE) + { + if ((*it)->has_loop && (*it)->sequence == sequence_cnt) + { + + KeyFrame* connected_KF = getKeyFrame((*it)->loop_index); + Vector3d connected_P; + Matrix3d connected_R; + connected_KF->getPose(connected_P, connected_R); + //(*it)->getVioPose(P, R); + (*it)->getPose(P, R); + if((*it)->sequence > 0) + { + posegraph_visualization->add_loopedge(P, connected_P + Vector3d(VISUALIZATION_SHIFT_X, VISUALIZATION_SHIFT_Y, 0)); + } + } + } + + } + publish(); + m_keyframelist.unlock(); +} + + +void PoseGraph::savePoseGraph() +{ + m_keyframelist.lock(); + TicToc tmp_t; + FILE *pFile; + printf("[POSEGRAPH]: pose graph path: %s\n",POSE_GRAPH_SAVE_PATH.c_str()); + printf("[POSEGRAPH]: pose graph saving... \n"); + string file_path = POSE_GRAPH_SAVE_PATH + "pose_graph.txt"; + pFile = fopen (file_path.c_str(),"w"); + //fprintf(pFile, "index time_stamp Tx Ty Tz Qw Qx Qy Qz loop_index loop_info\n"); + list::iterator it; + for (it = keyframelist.begin(); it != keyframelist.end(); it++) + { + std::string image_path, descriptor_path, brief_path, keypoints_path; + if (DEBUG_IMAGE) + { + image_path = POSE_GRAPH_SAVE_PATH + to_string((*it)->index) + "_image.png"; + imwrite(image_path.c_str(), (*it)->image); + } + Quaterniond VIO_tmp_Q{(*it)->vio_R_w_i}; + Quaterniond PG_tmp_Q{(*it)->R_w_i}; + Vector3d VIO_tmp_T = (*it)->vio_T_w_i; + Vector3d PG_tmp_T = (*it)->T_w_i; + + fprintf (pFile, " %d %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %d %f %f %f %f %f %f %f %f %d\n",(*it)->index, (*it)->time_stamp, + VIO_tmp_T.x(), VIO_tmp_T.y(), VIO_tmp_T.z(), + PG_tmp_T.x(), PG_tmp_T.y(), PG_tmp_T.z(), + VIO_tmp_Q.w(), VIO_tmp_Q.x(), VIO_tmp_Q.y(), VIO_tmp_Q.z(), + PG_tmp_Q.w(), PG_tmp_Q.x(), PG_tmp_Q.y(), PG_tmp_Q.z(), + (*it)->loop_index, + (*it)->loop_info(0), (*it)->loop_info(1), (*it)->loop_info(2), (*it)->loop_info(3), + (*it)->loop_info(4), (*it)->loop_info(5), (*it)->loop_info(6), (*it)->loop_info(7), + (int)(*it)->keypoints.size()); + + // write keypoints, brief_descriptors vector keypoints vector brief_descriptors; + assert((*it)->keypoints.size() == (*it)->brief_descriptors.size()); + brief_path = POSE_GRAPH_SAVE_PATH + to_string((*it)->index) + "_briefdes.dat"; + std::ofstream brief_file(brief_path, std::ios::binary); + keypoints_path = POSE_GRAPH_SAVE_PATH + to_string((*it)->index) + "_keypoints.txt"; + FILE *keypoints_file; + keypoints_file = fopen(keypoints_path.c_str(), "w"); + for (int i = 0; i < (int)(*it)->keypoints.size(); i++) + { + brief_file << (*it)->brief_descriptors[i] << endl; + fprintf(keypoints_file, "%f %f %f %f\n", (*it)->keypoints[i].pt.x, (*it)->keypoints[i].pt.y, + (*it)->keypoints_norm[i].pt.x, (*it)->keypoints_norm[i].pt.y); + } + brief_file.close(); + fclose(keypoints_file); + } + fclose(pFile); + + printf("[POSEGRAPH]: save pose graph time: %f s\n", tmp_t.toc() / 1000); + m_keyframelist.unlock(); +} +void PoseGraph::loadPoseGraph() +{ + TicToc tmp_t; + FILE * pFile; + string file_path = POSE_GRAPH_SAVE_PATH + "pose_graph.txt"; + printf("[POSEGRAPH]: lode pose graph from: %s \n", file_path.c_str()); + printf("[POSEGRAPH]: pose graph loading...\n"); + pFile = fopen (file_path.c_str(),"r"); + if (pFile == NULL) + { + printf("[POSEGRAPH]: lode previous pose graph error: wrong previous pose graph path or no previous pose graph \n the system will start with new pose graph \n"); + return; + } + int index; + double time_stamp; + double VIO_Tx, VIO_Ty, VIO_Tz; + double PG_Tx, PG_Ty, PG_Tz; + double VIO_Qw, VIO_Qx, VIO_Qy, VIO_Qz; + double PG_Qw, PG_Qx, PG_Qy, PG_Qz; + double loop_info_0, loop_info_1, loop_info_2, loop_info_3; + double loop_info_4, loop_info_5, loop_info_6, loop_info_7; + int loop_index; + int keypoints_num; + Eigen::Matrix loop_info; + int cnt = 0; + while (fscanf(pFile,"%d %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %d %lf %lf %lf %lf %lf %lf %lf %lf %d", &index, &time_stamp, + &VIO_Tx, &VIO_Ty, &VIO_Tz, + &PG_Tx, &PG_Ty, &PG_Tz, + &VIO_Qw, &VIO_Qx, &VIO_Qy, &VIO_Qz, + &PG_Qw, &PG_Qx, &PG_Qy, &PG_Qz, + &loop_index, + &loop_info_0, &loop_info_1, &loop_info_2, &loop_info_3, + &loop_info_4, &loop_info_5, &loop_info_6, &loop_info_7, + &keypoints_num) != EOF) + { + /* + printf("[POSEGRAPH]: I read: %d %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %d %lf %lf %lf %lf %lf %lf %lf %lf %d\n", index, time_stamp, + VIO_Tx, VIO_Ty, VIO_Tz, + PG_Tx, PG_Ty, PG_Tz, + VIO_Qw, VIO_Qx, VIO_Qy, VIO_Qz, + PG_Qw, PG_Qx, PG_Qy, PG_Qz, + loop_index, + loop_info_0, loop_info_1, loop_info_2, loop_info_3, + loop_info_4, loop_info_5, loop_info_6, loop_info_7, + keypoints_num); + */ + cv::Mat image; + std::string image_path, descriptor_path; + if (DEBUG_IMAGE) + { + image_path = POSE_GRAPH_SAVE_PATH + to_string(index) + "_image.png"; + image = cv::imread(image_path.c_str(), 0); + } + + Vector3d VIO_T(VIO_Tx, VIO_Ty, VIO_Tz); + Vector3d PG_T(PG_Tx, PG_Ty, PG_Tz); + Quaterniond VIO_Q; + VIO_Q.w() = VIO_Qw; + VIO_Q.x() = VIO_Qx; + VIO_Q.y() = VIO_Qy; + VIO_Q.z() = VIO_Qz; + Quaterniond PG_Q; + PG_Q.w() = PG_Qw; + PG_Q.x() = PG_Qx; + PG_Q.y() = PG_Qy; + PG_Q.z() = PG_Qz; + Matrix3d VIO_R, PG_R; + VIO_R = VIO_Q.toRotationMatrix(); + PG_R = PG_Q.toRotationMatrix(); + Eigen::Matrix loop_info; + loop_info << loop_info_0, loop_info_1, loop_info_2, loop_info_3, loop_info_4, loop_info_5, loop_info_6, loop_info_7; + + if (loop_index != -1) + if (earliest_loop_index > loop_index || earliest_loop_index == -1) + { + earliest_loop_index = loop_index; + } + + // load keypoints, brief_descriptors + string brief_path = POSE_GRAPH_SAVE_PATH + to_string(index) + "_briefdes.dat"; + std::ifstream brief_file(brief_path, std::ios::binary); + string keypoints_path = POSE_GRAPH_SAVE_PATH + to_string(index) + "_keypoints.txt"; + FILE *keypoints_file; + keypoints_file = fopen(keypoints_path.c_str(), "r"); + vector keypoints; + vector keypoints_norm; + vector brief_descriptors; + for (int i = 0; i < keypoints_num; i++) + { + BRIEF::bitset tmp_des; + brief_file >> tmp_des; + brief_descriptors.push_back(tmp_des); + cv::KeyPoint tmp_keypoint; + cv::KeyPoint tmp_keypoint_norm; + double p_x, p_y, p_x_norm, p_y_norm; + if(!fscanf(keypoints_file,"%lf %lf %lf %lf", &p_x, &p_y, &p_x_norm, &p_y_norm)) + printf("[POSEGRAPH]: fail to load pose graph \n"); + tmp_keypoint.pt.x = p_x; + tmp_keypoint.pt.y = p_y; + tmp_keypoint_norm.pt.x = p_x_norm; + tmp_keypoint_norm.pt.y = p_y_norm; + keypoints.push_back(tmp_keypoint); + keypoints_norm.push_back(tmp_keypoint_norm); + } + brief_file.close(); + fclose(keypoints_file); + + KeyFrame* keyframe = new KeyFrame(time_stamp, index, VIO_T, VIO_R, PG_T, PG_R, image, loop_index, loop_info, keypoints, keypoints_norm, brief_descriptors); + loadKeyFrame(keyframe, 0); + if (cnt % 20 == 0) + { + publish(); + } + cnt++; + } + fclose (pFile); + printf("[POSEGRAPH]: load pose graph time: %f s\n", tmp_t.toc()/1000); + base_sequence = 0; +} + +void PoseGraph::publish() +{ + for (int i = 1; i <= sequence_cnt; i++) + { + //if (sequence_loop[i] == true || i == base_sequence) + if (1 || i == base_sequence) + { + pub_pg_path.publish(path[i]); + pub_path[i].publish(path[i]); + posegraph_visualization->publish_by(pub_pose_graph, path[sequence_cnt].header); + } + } + pub_base_path.publish(base_path); + //posegraph_visualization->publish_by(pub_pose_graph, path[sequence_cnt].header); +} diff --git a/loop_fusion/src/pose_graph.h b/loop_fusion/src/pose_graph.h new file mode 100644 index 0000000..ba73a11 --- /dev/null +++ b/loop_fusion/src/pose_graph.h @@ -0,0 +1,334 @@ +/******************************************************* + * Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology + * + * This file is part of VINS. + * + * Licensed under the GNU General Public License v3.0; + * you may not use this file except in compliance with the License. + * + * Author: Qin Tong (qintonguav@gmail.com) + *******************************************************/ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "keyframe.h" +#include "utility/tic_toc.h" +#include "utility/utility.h" +#include "utility/CameraPoseVisualization.h" +#include "utility/tic_toc.h" +#include "ThirdParty/DBoW/DBoW2.h" +#include "ThirdParty/DVision/DVision.h" +#include "ThirdParty/DBoW/TemplatedDatabase.h" +#include "ThirdParty/DBoW/TemplatedVocabulary.h" + + +#define SHOW_S_EDGE false +#define SHOW_L_EDGE true +#define SAVE_LOOP_PATH true + +using namespace DVision; +using namespace DBoW2; + +class PoseGraph +{ +public: + PoseGraph(); + ~PoseGraph(); + void registerPub(ros::NodeHandle &n); + void addKeyFrame(KeyFrame* cur_kf, bool flag_detect_loop); + void loadKeyFrame(KeyFrame* cur_kf, bool flag_detect_loop); + void loadVocabulary(std::string voc_path); + void setIMUFlag(bool _use_imu); + KeyFrame* getKeyFrame(int index); + nav_msgs::Path path[10]; + nav_msgs::Path base_path; + CameraPoseVisualization* posegraph_visualization; + void savePoseGraph(); + void loadPoseGraph(); + void publish(); + Vector3d t_drift; + double yaw_drift; + Matrix3d r_drift; + // world frame( base sequence or first sequence)<----> cur sequence frame + Vector3d w_t_vio; + Matrix3d w_r_vio; + + +private: + int detectLoop(KeyFrame* keyframe, int frame_index); + void addKeyFrameIntoVoc(KeyFrame* keyframe); + void optimize4DoF(); + void optimize6DoF(); + void updatePath(); + list keyframelist; + std::mutex m_keyframelist; + std::mutex m_optimize_buf; + std::mutex m_path; + std::mutex m_drift; + std::thread t_optimization; + std::queue optimize_buf; + + int global_index; + int sequence_cnt; + vector sequence_loop; + map image_pool; + int earliest_loop_index; + int base_sequence; + bool use_imu; + + BriefDatabase db; + BriefVocabulary* voc; + + ros::Publisher pub_pg_path; + ros::Publisher pub_base_path; + ros::Publisher pub_pose_graph; + ros::Publisher pub_path[10]; +}; + +template inline +void QuaternionInverse(const T q[4], T q_inverse[4]) +{ + q_inverse[0] = q[0]; + q_inverse[1] = -q[1]; + q_inverse[2] = -q[2]; + q_inverse[3] = -q[3]; +}; + +template +T NormalizeAngle(const T& angle_degrees) { + if (angle_degrees > T(180.0)) + return angle_degrees - T(360.0); + else if (angle_degrees < T(-180.0)) + return angle_degrees + T(360.0); + else + return angle_degrees; +}; + +class AngleLocalParameterization { + public: + + template + bool operator()(const T* theta_radians, const T* delta_theta_radians, + T* theta_radians_plus_delta) const { + *theta_radians_plus_delta = + NormalizeAngle(*theta_radians + *delta_theta_radians); + + return true; + } + + static ceres::LocalParameterization* Create() { + return (new ceres::AutoDiffLocalParameterization); + } +}; + +template +void YawPitchRollToRotationMatrix(const T yaw, const T pitch, const T roll, T R[9]) +{ + + T y = yaw / T(180.0) * T(M_PI); + T p = pitch / T(180.0) * T(M_PI); + T r = roll / T(180.0) * T(M_PI); + + + R[0] = cos(y) * cos(p); + R[1] = -sin(y) * cos(r) + cos(y) * sin(p) * sin(r); + R[2] = sin(y) * sin(r) + cos(y) * sin(p) * cos(r); + R[3] = sin(y) * cos(p); + R[4] = cos(y) * cos(r) + sin(y) * sin(p) * sin(r); + R[5] = -cos(y) * sin(r) + sin(y) * sin(p) * cos(r); + R[6] = -sin(p); + R[7] = cos(p) * sin(r); + R[8] = cos(p) * cos(r); +}; + +template +void RotationMatrixTranspose(const T R[9], T inv_R[9]) +{ + inv_R[0] = R[0]; + inv_R[1] = R[3]; + inv_R[2] = R[6]; + inv_R[3] = R[1]; + inv_R[4] = R[4]; + inv_R[5] = R[7]; + inv_R[6] = R[2]; + inv_R[7] = R[5]; + inv_R[8] = R[8]; +}; + +template +void RotationMatrixRotatePoint(const T R[9], const T t[3], T r_t[3]) +{ + r_t[0] = R[0] * t[0] + R[1] * t[1] + R[2] * t[2]; + r_t[1] = R[3] * t[0] + R[4] * t[1] + R[5] * t[2]; + r_t[2] = R[6] * t[0] + R[7] * t[1] + R[8] * t[2]; +}; + +struct FourDOFError +{ + FourDOFError(double t_x, double t_y, double t_z, double relative_yaw, double pitch_i, double roll_i) + :t_x(t_x), t_y(t_y), t_z(t_z), relative_yaw(relative_yaw), pitch_i(pitch_i), roll_i(roll_i){} + + template + bool operator()(const T* const yaw_i, const T* ti, const T* yaw_j, const T* tj, T* residuals) const + { + T t_w_ij[3]; + t_w_ij[0] = tj[0] - ti[0]; + t_w_ij[1] = tj[1] - ti[1]; + t_w_ij[2] = tj[2] - ti[2]; + + // euler to rotation + T w_R_i[9]; + YawPitchRollToRotationMatrix(yaw_i[0], T(pitch_i), T(roll_i), w_R_i); + // rotation transpose + T i_R_w[9]; + RotationMatrixTranspose(w_R_i, i_R_w); + // rotation matrix rotate point + T t_i_ij[3]; + RotationMatrixRotatePoint(i_R_w, t_w_ij, t_i_ij); + + residuals[0] = (t_i_ij[0] - T(t_x)); + residuals[1] = (t_i_ij[1] - T(t_y)); + residuals[2] = (t_i_ij[2] - T(t_z)); + residuals[3] = NormalizeAngle(yaw_j[0] - yaw_i[0] - T(relative_yaw)); + + return true; + } + + static ceres::CostFunction* Create(const double t_x, const double t_y, const double t_z, + const double relative_yaw, const double pitch_i, const double roll_i) + { + return (new ceres::AutoDiffCostFunction< + FourDOFError, 4, 1, 3, 1, 3>( + new FourDOFError(t_x, t_y, t_z, relative_yaw, pitch_i, roll_i))); + } + + double t_x, t_y, t_z; + double relative_yaw, pitch_i, roll_i; + +}; + +struct FourDOFWeightError +{ + FourDOFWeightError(double t_x, double t_y, double t_z, double relative_yaw, double pitch_i, double roll_i) + :t_x(t_x), t_y(t_y), t_z(t_z), relative_yaw(relative_yaw), pitch_i(pitch_i), roll_i(roll_i){ + weight = 1; + } + + template + bool operator()(const T* const yaw_i, const T* ti, const T* yaw_j, const T* tj, T* residuals) const + { + T t_w_ij[3]; + t_w_ij[0] = tj[0] - ti[0]; + t_w_ij[1] = tj[1] - ti[1]; + t_w_ij[2] = tj[2] - ti[2]; + + // euler to rotation + T w_R_i[9]; + YawPitchRollToRotationMatrix(yaw_i[0], T(pitch_i), T(roll_i), w_R_i); + // rotation transpose + T i_R_w[9]; + RotationMatrixTranspose(w_R_i, i_R_w); + // rotation matrix rotate point + T t_i_ij[3]; + RotationMatrixRotatePoint(i_R_w, t_w_ij, t_i_ij); + + residuals[0] = (t_i_ij[0] - T(t_x)) * T(weight); + residuals[1] = (t_i_ij[1] - T(t_y)) * T(weight); + residuals[2] = (t_i_ij[2] - T(t_z)) * T(weight); + residuals[3] = NormalizeAngle((yaw_j[0] - yaw_i[0] - T(relative_yaw))) * T(weight) / T(10.0); + + return true; + } + + static ceres::CostFunction* Create(const double t_x, const double t_y, const double t_z, + const double relative_yaw, const double pitch_i, const double roll_i) + { + return (new ceres::AutoDiffCostFunction< + FourDOFWeightError, 4, 1, 3, 1, 3>( + new FourDOFWeightError(t_x, t_y, t_z, relative_yaw, pitch_i, roll_i))); + } + + double t_x, t_y, t_z; + double relative_yaw, pitch_i, roll_i; + double weight; + +}; + +struct RelativeRTError +{ + RelativeRTError(double t_x, double t_y, double t_z, + double q_w, double q_x, double q_y, double q_z, + double t_var, double q_var) + :t_x(t_x), t_y(t_y), t_z(t_z), + q_w(q_w), q_x(q_x), q_y(q_y), q_z(q_z), + t_var(t_var), q_var(q_var){} + + template + bool operator()(const T* const w_q_i, const T* ti, const T* w_q_j, const T* tj, T* residuals) const + { + T t_w_ij[3]; + t_w_ij[0] = tj[0] - ti[0]; + t_w_ij[1] = tj[1] - ti[1]; + t_w_ij[2] = tj[2] - ti[2]; + + T i_q_w[4]; + QuaternionInverse(w_q_i, i_q_w); + + T t_i_ij[3]; + ceres::QuaternionRotatePoint(i_q_w, t_w_ij, t_i_ij); + + residuals[0] = (t_i_ij[0] - T(t_x)) / T(t_var); + residuals[1] = (t_i_ij[1] - T(t_y)) / T(t_var); + residuals[2] = (t_i_ij[2] - T(t_z)) / T(t_var); + + T relative_q[4]; + relative_q[0] = T(q_w); + relative_q[1] = T(q_x); + relative_q[2] = T(q_y); + relative_q[3] = T(q_z); + + T q_i_j[4]; + ceres::QuaternionProduct(i_q_w, w_q_j, q_i_j); + + T relative_q_inv[4]; + QuaternionInverse(relative_q, relative_q_inv); + + T error_q[4]; + ceres::QuaternionProduct(relative_q_inv, q_i_j, error_q); + + residuals[3] = T(2) * error_q[1] / T(q_var); + residuals[4] = T(2) * error_q[2] / T(q_var); + residuals[5] = T(2) * error_q[3] / T(q_var); + + return true; + } + + static ceres::CostFunction* Create(const double t_x, const double t_y, const double t_z, + const double q_w, const double q_x, const double q_y, const double q_z, + const double t_var, const double q_var) + { + return (new ceres::AutoDiffCostFunction< + RelativeRTError, 6, 4, 3, 4, 3>( + new RelativeRTError(t_x, t_y, t_z, q_w, q_x, q_y, q_z, t_var, q_var))); + } + + double t_x, t_y, t_z, t_norm; + double q_w, q_x, q_y, q_z; + double t_var, q_var; + +}; \ No newline at end of file diff --git a/loop_fusion/src/pose_graph_node.cpp b/loop_fusion/src/pose_graph_node.cpp new file mode 100644 index 0000000..12c83db --- /dev/null +++ b/loop_fusion/src/pose_graph_node.cpp @@ -0,0 +1,633 @@ +/******************************************************* + * Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology + * + * This file is part of VINS. + * + * Licensed under the GNU General Public License v3.0; + * you may not use this file except in compliance with the License. + * + * Author: Qin Tong (qintonguav@gmail.com) + *******************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "keyframe.h" +#include "utility/tic_toc.h" +#include "pose_graph.h" +#include "utility/CameraPoseVisualization.h" +#include "parameters.h" +#define SKIP_FIRST_CNT 2 +using namespace std; + +queue image_buf; +queue point_buf; +queue pose_buf; +queue odometry_buf; +std::mutex m_buf; +std::mutex m_process; +int frame_index = 0; +int sequence = 1; +PoseGraph posegraph; +int skip_first_cnt = 0; +int SKIP_CNT; +int skip_cnt = 0; +bool load_flag = 0; +bool start_flag = 0; +double SKIP_DIS = 0; +double MIN_SCORE = 0.015; +double PNP_INFLATION = 1.0; +int RECALL_IGNORE_RECENT_COUNT = 50; +double MAX_THETA_DIFF = 30.0; +double MAX_POS_DIFF = 20.0; +int MIN_LOOP_NUM = 25; + +int VISUALIZATION_SHIFT_X; +int VISUALIZATION_SHIFT_Y; +int ROW; +int COL; +int DEBUG_IMAGE; + +camodocal::CameraPtr m_camera; +double max_focallength = 460.0; +Eigen::Vector3d tic; +Eigen::Matrix3d qic; +ros::Publisher pub_match_img; +ros::Publisher pub_camera_pose_visual; +ros::Publisher pub_odometry_rect; +ros::Publisher pub_pose_rect; + +std::string BRIEF_PATTERN_FILE; +std::string POSE_GRAPH_SAVE_PATH; +std::string VINS_RESULT_PATH; +CameraPoseVisualization cameraposevisual(1, 0, 0, 1); +Eigen::Vector3d last_t(-100, -100, -100); +double last_image_time = -1; + +ros::Publisher pub_point_cloud, pub_margin_cloud; + +void new_sequence() +{ + printf("[POSEGRAPH]: new sequence\n"); + sequence++; + printf("[POSEGRAPH]: sequence cnt %d \n", sequence); + if (sequence > 5) + { + ROS_WARN("only support 5 sequences since it's boring to copy code for more sequences."); + ROS_BREAK(); + } + posegraph.posegraph_visualization->reset(); + posegraph.publish(); + m_buf.lock(); + while(!image_buf.empty()) + image_buf.pop(); + while(!point_buf.empty()) + point_buf.pop(); + while(!pose_buf.empty()) + pose_buf.pop(); + while(!odometry_buf.empty()) + odometry_buf.pop(); + m_buf.unlock(); +} + +void image_callback(const sensor_msgs::ImageConstPtr &image_msg) +{ +// ROS_INFO("entering image_callback"); + m_buf.lock(); + image_buf.push(image_msg); + m_buf.unlock(); + //printf("[POSEGRAPH]: image time %f \n", image_msg->header.stamp.toSec()); + + // detect unstable camera stream + if (last_image_time == -1) + last_image_time = image_msg->header.stamp.toSec(); + else if (image_msg->header.stamp.toSec() - last_image_time > 1.0 || image_msg->header.stamp.toSec() < last_image_time) + { + ROS_WARN("image discontinue! detect a new sequence!"); + new_sequence(); + } + last_image_time = image_msg->header.stamp.toSec(); +// ROS_INFO("exiting image_callback"); +} + +void point_callback(const sensor_msgs::PointCloudConstPtr &point_msg) +{ +// ROS_INFO("entering point_callback"); + m_buf.lock(); + point_buf.push(point_msg); + m_buf.unlock(); + /* + for (unsigned int i = 0; i < point_msg->points.size(); i++) + { + printf("[POSEGRAPH]: %d, 3D point: %f, %f, %f 2D point %f, %f \n",i , point_msg->points[i].x, + point_msg->points[i].y, + point_msg->points[i].z, + point_msg->channels[i].values[0], + point_msg->channels[i].values[1]); + } + */ + // for visualization + sensor_msgs::PointCloud point_cloud; + point_cloud.header = point_msg->header; + for (unsigned int i = 0; i < point_msg->points.size(); i++) + { + cv::Point3f p_3d; + p_3d.x = point_msg->points[i].x; + p_3d.y = point_msg->points[i].y; + p_3d.z = point_msg->points[i].z; + Eigen::Vector3d tmp = posegraph.r_drift * Eigen::Vector3d(p_3d.x, p_3d.y, p_3d.z) + posegraph.t_drift; + geometry_msgs::Point32 p; + p.x = tmp(0); + p.y = tmp(1); + p.z = tmp(2); + point_cloud.points.push_back(p); + } + pub_point_cloud.publish(point_cloud); +// ROS_INFO("exiting point_callback"); +} + +// only for visualization +void margin_point_callback(const sensor_msgs::PointCloudConstPtr &point_msg) +{ +// ROS_INFO("entering margin_point_callback"); + sensor_msgs::PointCloud point_cloud; + point_cloud.header = point_msg->header; + for (unsigned int i = 0; i < point_msg->points.size(); i++) + { + cv::Point3f p_3d; + p_3d.x = point_msg->points[i].x; + p_3d.y = point_msg->points[i].y; + p_3d.z = point_msg->points[i].z; + Eigen::Vector3d tmp = posegraph.r_drift * Eigen::Vector3d(p_3d.x, p_3d.y, p_3d.z) + posegraph.t_drift; + geometry_msgs::Point32 p; + p.x = tmp(0); + p.y = tmp(1); + p.z = tmp(2); + point_cloud.points.push_back(p); + } + pub_margin_cloud.publish(point_cloud); +// ROS_INFO("exiting margin_point_callback"); +} + +void pose_callback(const nav_msgs::Odometry::ConstPtr &pose_msg) +{ +// ROS_INFO("entering pose_callback"); + m_buf.lock(); + pose_buf.push(pose_msg); + m_buf.unlock(); + /* + printf("[POSEGRAPH]: pose t: %f, %f, %f q: %f, %f, %f %f \n", pose_msg->pose.pose.position.x, + pose_msg->pose.pose.position.y, + pose_msg->pose.pose.position.z, + pose_msg->pose.pose.orientation.w, + pose_msg->pose.pose.orientation.x, + pose_msg->pose.pose.orientation.y, + pose_msg->pose.pose.orientation.z); + */ +// ROS_INFO("exiting pose_callback"); +} + +void vio_callback(const nav_msgs::Odometry::ConstPtr &pose_msg) +{ +// ROS_INFO("entering vio_callback"); + Vector3d vio_t(pose_msg->pose.pose.position.x, pose_msg->pose.pose.position.y, pose_msg->pose.pose.position.z); + Quaterniond vio_q; + vio_q.w() = pose_msg->pose.pose.orientation.w; + vio_q.x() = pose_msg->pose.pose.orientation.x; + vio_q.y() = pose_msg->pose.pose.orientation.y; + vio_q.z() = pose_msg->pose.pose.orientation.z; + + vio_t = posegraph.w_r_vio * vio_t + posegraph.w_t_vio; + vio_q = posegraph.w_r_vio * vio_q; + + vio_t = posegraph.r_drift * vio_t + posegraph.t_drift; + vio_q = posegraph.r_drift * vio_q; + + nav_msgs::Odometry odometry; + odometry.header = pose_msg->header; + odometry.header.frame_id = "global"; + odometry.pose.pose.position.x = vio_t.x(); + odometry.pose.pose.position.y = vio_t.y(); + odometry.pose.pose.position.z = vio_t.z(); + odometry.pose.pose.orientation.x = vio_q.x(); + odometry.pose.pose.orientation.y = vio_q.y(); + odometry.pose.pose.orientation.z = vio_q.z(); + odometry.pose.pose.orientation.w = vio_q.w(); + odometry.twist = pose_msg->twist; + odometry.pose.covariance = pose_msg->pose.covariance; + pub_odometry_rect.publish(odometry); + + Vector3d vio_t_cam; + Quaterniond vio_q_cam; + vio_t_cam = vio_t + vio_q * tic; + vio_q_cam = vio_q * qic; + + cameraposevisual.reset(); + cameraposevisual.add_pose(vio_t_cam, vio_q_cam); + cameraposevisual.publish_by(pub_camera_pose_visual, pose_msg->header); +// ROS_INFO("exiting vio_callback"); +} + + +void vio_callback_pose(const geometry_msgs::PoseWithCovarianceStamped::ConstPtr &pose_msg) +{ +// ROS_INFO("entering vio_callback_pose"); + Vector3d vio_t(pose_msg->pose.pose.position.x, pose_msg->pose.pose.position.y, pose_msg->pose.pose.position.z); + Quaterniond vio_q; + vio_q.w() = pose_msg->pose.pose.orientation.w; + vio_q.x() = pose_msg->pose.pose.orientation.x; + vio_q.y() = pose_msg->pose.pose.orientation.y; + vio_q.z() = pose_msg->pose.pose.orientation.z; + + vio_t = posegraph.w_r_vio * vio_t + posegraph.w_t_vio; + vio_q = posegraph.w_r_vio * vio_q; + + vio_t = posegraph.r_drift * vio_t + posegraph.t_drift; + vio_q = posegraph.r_drift * vio_q; + + geometry_msgs::PoseWithCovarianceStamped odometry; + odometry.header = pose_msg->header; + odometry.header.frame_id = "global"; + odometry.pose.pose.position.x = vio_t.x(); + odometry.pose.pose.position.y = vio_t.y(); + odometry.pose.pose.position.z = vio_t.z(); + odometry.pose.pose.orientation.x = vio_q.x(); + odometry.pose.pose.orientation.y = vio_q.y(); + odometry.pose.pose.orientation.z = vio_q.z(); + odometry.pose.pose.orientation.w = vio_q.w(); + odometry.pose.covariance = pose_msg->pose.covariance; + pub_pose_rect.publish(odometry); + + Vector3d vio_t_cam; + Quaterniond vio_q_cam; + vio_t_cam = vio_t + vio_q * tic; + vio_q_cam = vio_q * qic; + + cameraposevisual.reset(); + cameraposevisual.add_pose(vio_t_cam, vio_q_cam); + cameraposevisual.publish_by(pub_camera_pose_visual, pose_msg->header); +// ROS_INFO("exiting vio_callback_pose"); +} + +void extrinsic_callback(const nav_msgs::Odometry::ConstPtr &pose_msg) +{ +// ROS_INFO("entering extrinsic_callback"); + m_process.lock(); + tic = Vector3d(pose_msg->pose.pose.position.x, + pose_msg->pose.pose.position.y, + pose_msg->pose.pose.position.z); + qic = Quaterniond(pose_msg->pose.pose.orientation.w, + pose_msg->pose.pose.orientation.x, + pose_msg->pose.pose.orientation.y, + pose_msg->pose.pose.orientation.z).toRotationMatrix(); + m_process.unlock(); +// ROS_INFO("exiting extrinsic_callback"); +} + + +void intrinsics_callback(const sensor_msgs::CameraInfo::ConstPtr &msg) +{ +// ROS_INFO("entering intrinsics_callback"); + m_process.lock(); + assert(msg->K.size()==9); + assert(msg->D.size()==4); + cv::Size imageSize(msg->width, msg->height); + if(msg->distortion_model == "plumb_bob") { + m_camera = camodocal::CameraFactory::instance()->generateCamera(camodocal::Camera::ModelType::PINHOLE, "cam0", imageSize); + std::vector parameters; + parameters.push_back(msg->D.at(0)); + parameters.push_back(msg->D.at(1)); + parameters.push_back(msg->D.at(2)); + parameters.push_back(msg->D.at(3)); + parameters.push_back(msg->K.at(0)); + parameters.push_back(msg->K.at(4)); + parameters.push_back(msg->K.at(2)); + parameters.push_back(msg->K.at(5)); + m_camera.get()->readParameters(parameters); + max_focallength = std::max(msg->K.at(0), msg->K.at(4)); + } else if(msg->distortion_model == "equidistant") { + m_camera = camodocal::CameraFactory::instance()->generateCamera(camodocal::Camera::ModelType::KANNALA_BRANDT, "cam0", imageSize); + std::vector parameters; + parameters.push_back(msg->D.at(0)); + parameters.push_back(msg->D.at(1)); + parameters.push_back(msg->D.at(2)); + parameters.push_back(msg->D.at(3)); + parameters.push_back(msg->K.at(0)); + parameters.push_back(msg->K.at(4)); + parameters.push_back(msg->K.at(2)); + parameters.push_back(msg->K.at(5)); + m_camera.get()->readParameters(parameters); + max_focallength = std::max(msg->K.at(0), msg->K.at(4)); + } else { + throw std::runtime_error("Invalid distorition model, unable to parse (plumb_bob, equidistant)"); + } + m_process.unlock(); +// ROS_INFO("exiting intrinsics_callback"); +} + +[[noreturn]] void process() +{ + while (true) + { + sensor_msgs::ImageConstPtr image_msg = NULL; + sensor_msgs::PointCloudConstPtr point_msg = NULL; + nav_msgs::Odometry::ConstPtr pose_msg = NULL; + + // find out the messages with same time stamp + m_buf.lock(); + if(!image_buf.empty() && !point_buf.empty() && !pose_buf.empty()) + { + if (image_buf.front()->header.stamp.toSec() > pose_buf.front()->header.stamp.toSec()) + { + pose_buf.pop(); + printf("[POSEGRAPH]: throw pose at beginning\n"); + } + else if (image_buf.front()->header.stamp.toSec() > point_buf.front()->header.stamp.toSec()) + { + point_buf.pop(); + printf("[POSEGRAPH]: throw point at beginning\n"); + } + else if (image_buf.back()->header.stamp.toSec() >= pose_buf.front()->header.stamp.toSec() + && point_buf.back()->header.stamp.toSec() >= pose_buf.front()->header.stamp.toSec()) + { + pose_msg = pose_buf.front(); + pose_buf.pop(); + while (!pose_buf.empty()) + pose_buf.pop(); + while (image_buf.front()->header.stamp.toSec() < pose_msg->header.stamp.toSec()) + image_buf.pop(); + image_msg = image_buf.front(); + image_buf.pop(); + + while (point_buf.front()->header.stamp.toSec() < pose_msg->header.stamp.toSec()) + point_buf.pop(); + point_msg = point_buf.front(); + point_buf.pop(); + } + } + m_buf.unlock(); + + if (pose_msg != NULL) + { + //printf("[POSEGRAPH]: pose time %f \n", pose_msg->header.stamp.toSec()); + //printf("[POSEGRAPH]: point time %f \n", point_msg->header.stamp.toSec()); + //printf("[POSEGRAPH]: image time %f \n", image_msg->header.stamp.toSec()); + // skip fisrt few + if (skip_first_cnt < SKIP_FIRST_CNT) + { + skip_first_cnt++; + continue; + } + + if (skip_cnt < SKIP_CNT) + { + skip_cnt++; + continue; + } + else + { + skip_cnt = 0; + } + + cv_bridge::CvImageConstPtr ptr; + if (image_msg->encoding == "8UC1") + { + sensor_msgs::Image img; + img.header = image_msg->header; + img.height = image_msg->height; + img.width = image_msg->width; + img.is_bigendian = image_msg->is_bigendian; + img.step = image_msg->step; + img.data = image_msg->data; + img.encoding = "mono8"; + ptr = cv_bridge::toCvCopy(img, sensor_msgs::image_encodings::MONO8); + } + else + ptr = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::MONO8); + + cv::Mat image = ptr->image; + //cv::equalizeHist(image, image); + + // build keyframe + Vector3d T = Vector3d(pose_msg->pose.pose.position.x, + pose_msg->pose.pose.position.y, + pose_msg->pose.pose.position.z); + Matrix3d R = Quaterniond(pose_msg->pose.pose.orientation.w, + pose_msg->pose.pose.orientation.x, + pose_msg->pose.pose.orientation.y, + pose_msg->pose.pose.orientation.z).toRotationMatrix(); + if((T - last_t).norm() > SKIP_DIS) + { + vector point_3d; + vector point_2d_uv; + vector point_2d_normal; + vector point_id; + + for (unsigned int i = 0; i < point_msg->points.size(); i++) + { + cv::Point3f p_3d; + p_3d.x = point_msg->points[i].x; + p_3d.y = point_msg->points[i].y; + p_3d.z = point_msg->points[i].z; + point_3d.push_back(p_3d); + + cv::Point2f p_2d_uv, p_2d_normal; + double p_id; + p_2d_normal.x = point_msg->channels[i].values[0]; + p_2d_normal.y = point_msg->channels[i].values[1]; + p_2d_uv.x = point_msg->channels[i].values[2]; + p_2d_uv.y = point_msg->channels[i].values[3]; + p_id = point_msg->channels[i].values[4]; + point_2d_normal.push_back(p_2d_normal); + point_2d_uv.push_back(p_2d_uv); + point_id.push_back(p_id); + + //printf("[POSEGRAPH]: u %f, v %f \n", p_2d_uv.x, p_2d_uv.y); + } + + KeyFrame* keyframe = new KeyFrame(pose_msg->header.stamp.toSec(), frame_index, T, R, image, + point_3d, point_2d_uv, point_2d_normal, point_id, sequence); + m_process.lock(); + start_flag = 1; + posegraph.addKeyFrame(keyframe, 1); + m_process.unlock(); + frame_index++; + last_t = T; + } + } + std::chrono::milliseconds dura(5); + std::this_thread::sleep_for(dura); + } +} + +[[noreturn]] void command() +{ + while(1) + { + char c = getchar(); + if (c == 's') + { + m_process.lock(); + posegraph.savePoseGraph(); + m_process.unlock(); + printf("[POSEGRAPH]: save pose graph finish\nyou can set 'load_previous_pose_graph' to 1 in the config file to reuse it next time\n"); + printf("[POSEGRAPH]: program shutting down...\n"); + ros::shutdown(); + } + if (c == 'n') + new_sequence(); + + std::chrono::milliseconds dura(5); + std::this_thread::sleep_for(dura); + } +} + +int main(int argc, char **argv) +{ + ros::init(argc, argv, "loop_fusion"); + ros::NodeHandle n("~"); + posegraph.registerPub(n); + + VISUALIZATION_SHIFT_X = 0; + VISUALIZATION_SHIFT_Y = 0; + SKIP_CNT = 0; + SKIP_DIS = 0; + + if(argc != 2) + { + printf("[POSEGRAPH]: please intput: rosrun loop_fusion loop_fusion_node [config file] \n" + "for example: rosrun loop_fusion loop_fusion_node " + "/home/tony-ws1/catkin_ws/src/VINS-Fusion/config/euroc/euroc_stereo_imu_config.yaml \n"); + return 0; + } + + string config_file = argv[1]; + printf("[POSEGRAPH]: config_file: %s\n", argv[1]); + + cv::FileStorage fsSettings(config_file, cv::FileStorage::READ); + if(!fsSettings.isOpened()) + { + std::cerr << "ERROR: Wrong path to settings" << std::endl; + } + + cameraposevisual.setScale(0.1); + cameraposevisual.setLineWidth(0.01); + + + std::string pkg_path = ros::package::getPath("loop_fusion"); + string vocabulary_file = pkg_path + "/../support_files/brief_k10L6.bin"; + cout << "vocabulary_file" << vocabulary_file << endl; + posegraph.loadVocabulary(vocabulary_file); + BRIEF_PATTERN_FILE = pkg_path + "/../support_files/brief_pattern.yml"; + cout << "BRIEF_PATTERN_FILE" << BRIEF_PATTERN_FILE << endl; + + + //ROW = fsSettings["image_height"]; + //COL = fsSettings["image_width"]; + //int pn = config_file.find_last_of('/'); + //std::string configPath = config_file.substr(0, pn); + //std::string cam0Calib; + //fsSettings["cam0_calib"] >> cam0Calib; + //std::string cam0Path = configPath + "/" + cam0Calib; + //printf("[POSEGRAPH]: cam calib path: %s\n", cam0Path.c_str()); + //m_camera = camodocal::CameraFactory::instance()->generateCameraFromYamlFile(cam0Path.c_str()); + + + fsSettings["pose_graph_save_path"] >> POSE_GRAPH_SAVE_PATH; + fsSettings["output_path"] >> VINS_RESULT_PATH; + fsSettings["save_image"] >> DEBUG_IMAGE; + fsSettings["skip_dist"] >> SKIP_DIS; + fsSettings["skip_cnt"] >> SKIP_CNT; + fsSettings["min_score"] >> MIN_SCORE; + fsSettings["pnp_inflation"] >> PNP_INFLATION; + fsSettings["recall_ignore_recent_ct"] >> RECALL_IGNORE_RECENT_COUNT; + fsSettings["max_theta_diff"] >> MAX_THETA_DIFF; + fsSettings["max_pos_diff"] >> MAX_POS_DIFF; + fsSettings["min_loop_feat_num"] >> MIN_LOOP_NUM; + + int LOAD_PREVIOUS_POSE_GRAPH; + LOAD_PREVIOUS_POSE_GRAPH = fsSettings["load_previous_pose_graph"]; + VINS_RESULT_PATH = VINS_RESULT_PATH + "/vio_loop.csv"; + std::ofstream fout(VINS_RESULT_PATH, std::ios::out); + fout.close(); + + int USE_IMU = fsSettings["imu"]; + posegraph.setIMUFlag(USE_IMU); + fsSettings.release(); + + if (LOAD_PREVIOUS_POSE_GRAPH) + { + printf("[POSEGRAPH]: load pose graph\n"); + m_process.lock(); + posegraph.loadPoseGraph(); + m_process.unlock(); + printf("[POSEGRAPH]: load pose graph finish\n"); + load_flag = 1; + } + else + { + printf("[POSEGRAPH]: no previous pose graph\n"); + load_flag = 1; + } + + + // Get camera information + printf("[POSEGRAPH]: waiting for camera info topic...\n"); + auto msg1 = ros::topic::waitForMessage("/ov_msckf/camera_info", ros::Duration(ros::DURATION_MAX)); + intrinsics_callback(msg1); + printf("[POSEGRAPH]: received camera info message!\n"); + std::cout << m_camera.get()->parametersToString() << std::endl; + + // Get camera to imu information + printf("[POSEGRAPH]: waiting for camera to imu extrinsics topic...\n"); + auto msg2 = ros::topic::waitForMessage("/ov_msckf/odomimu", ros::Duration(ros::DURATION_MAX)); + extrinsic_callback(msg2); + printf("[POSEGRAPH]: received camera to imu extrinsics message!\n"); + std::cout << qic.transpose() << std::endl; + std::cout << tic.transpose() << std::endl; + + // Setup the rest of the publishers + ros::Subscriber sub_vio1 = n.subscribe("/vins_estimator/odometry", 2000, vio_callback); + ros::Subscriber sub_vio2 = n.subscribe("/vins_estimator/pose", 2000, vio_callback_pose); + ros::Subscriber sub_image = n.subscribe("/cam0/image_raw", 2000, image_callback); + ros::Subscriber sub_pose = n.subscribe("/vins_estimator/keyframe_pose", 2000, pose_callback); + ros::Subscriber sub_extrinsic = n.subscribe("/vins_estimator/extrinsic", 2000, extrinsic_callback); + ros::Subscriber sub_intrinsics = n.subscribe("/vins_estimator/intrinsics", 2000, intrinsics_callback); + ros::Subscriber sub_point = n.subscribe("/vins_estimator/keyframe_point", 2000, point_callback); + // PI: it says that it's only for visualization + ros::Subscriber sub_margin_point = n.subscribe("/vins_estimator/margin_cloud", 2000, margin_point_callback); + + pub_match_img = n.advertise("match_image", 1000); + pub_camera_pose_visual = n.advertise("camera_pose_visual", 1000); + pub_point_cloud = n.advertise("point_cloud_loop_rect", 1000); + pub_margin_cloud = n.advertise("margin_cloud_loop_rect", 1000); + pub_odometry_rect = n.advertise("odometry_rect", 1000); + pub_pose_rect = n.advertise("pose_rect", 1000); + + std::thread measurement_process; + std::thread keyboard_command_process; + + measurement_process = std::thread(process); + keyboard_command_process = std::thread(command); + + ros::spin(); + + return 0; +} diff --git a/loop_fusion/src/utility/CameraPoseVisualization.cpp b/loop_fusion/src/utility/CameraPoseVisualization.cpp new file mode 100644 index 0000000..36a1f26 --- /dev/null +++ b/loop_fusion/src/utility/CameraPoseVisualization.cpp @@ -0,0 +1,327 @@ +/******************************************************* + * Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology + * + * This file is part of VINS. + * + * Licensed under the GNU General Public License v3.0; + * you may not use this file except in compliance with the License. + *******************************************************/ + +#include "CameraPoseVisualization.h" + +const Eigen::Vector3d CameraPoseVisualization::imlt = Eigen::Vector3d(-1.0, -0.5, 1.0); +const Eigen::Vector3d CameraPoseVisualization::imrt = Eigen::Vector3d( 1.0, -0.5, 1.0); +const Eigen::Vector3d CameraPoseVisualization::imlb = Eigen::Vector3d(-1.0, 0.5, 1.0); +const Eigen::Vector3d CameraPoseVisualization::imrb = Eigen::Vector3d( 1.0, 0.5, 1.0); +const Eigen::Vector3d CameraPoseVisualization::lt0 = Eigen::Vector3d(-0.7, -0.5, 1.0); +const Eigen::Vector3d CameraPoseVisualization::lt1 = Eigen::Vector3d(-0.7, -0.2, 1.0); +const Eigen::Vector3d CameraPoseVisualization::lt2 = Eigen::Vector3d(-1.0, -0.2, 1.0); +const Eigen::Vector3d CameraPoseVisualization::oc = Eigen::Vector3d(0.0, 0.0, 0.0); + +void Eigen2Point(const Eigen::Vector3d& v, geometry_msgs::Point& p) { + p.x = v.x(); + p.y = v.y(); + p.z = v.z(); +} + +CameraPoseVisualization::CameraPoseVisualization(float r, float g, float b, float a) + : m_marker_ns("CameraPoseVisualization"), m_scale(0.2), m_line_width(0.01) { + m_image_boundary_color.r = r; + m_image_boundary_color.g = g; + m_image_boundary_color.b = b; + m_image_boundary_color.a = a; + m_optical_center_connector_color.r = r; + m_optical_center_connector_color.g = g; + m_optical_center_connector_color.b = b; + m_optical_center_connector_color.a = a; + LOOP_EDGE_NUM = 20; + tmp_loop_edge_num = 1; +} + +void CameraPoseVisualization::setImageBoundaryColor(float r, float g, float b, float a) { + m_image_boundary_color.r = r; + m_image_boundary_color.g = g; + m_image_boundary_color.b = b; + m_image_boundary_color.a = a; +} + +void CameraPoseVisualization::setOpticalCenterConnectorColor(float r, float g, float b, float a) { + m_optical_center_connector_color.r = r; + m_optical_center_connector_color.g = g; + m_optical_center_connector_color.b = b; + m_optical_center_connector_color.a = a; +} + +void CameraPoseVisualization::setScale(double s) { + m_scale = s; +} +void CameraPoseVisualization::setLineWidth(double width) { + m_line_width = width; +} +void CameraPoseVisualization::add_edge(const Eigen::Vector3d& p0, const Eigen::Vector3d& p1){ + visualization_msgs::Marker marker; + + marker.ns = m_marker_ns; + marker.id = m_markers.size() + 1; + marker.type = visualization_msgs::Marker::LINE_LIST; + marker.action = visualization_msgs::Marker::ADD; + marker.scale.x = 0.01; + + marker.color.b = 1.0f; + marker.color.a = 1.0; + + geometry_msgs::Point point0, point1; + + Eigen2Point(p0, point0); + Eigen2Point(p1, point1); + + marker.points.push_back(point0); + marker.points.push_back(point1); + + m_markers.push_back(marker); +} + +void CameraPoseVisualization::add_loopedge(const Eigen::Vector3d& p0, const Eigen::Vector3d& p1){ + //m_markers.clear(); + visualization_msgs::Marker marker; + + marker.ns = m_marker_ns; + marker.id = m_markers.size() + 1; + //tmp_loop_edge_num++; + //if(tmp_loop_edge_num >= LOOP_EDGE_NUM) + // tmp_loop_edge_num = 1; + marker.type = visualization_msgs::Marker::LINE_STRIP; + marker.action = visualization_msgs::Marker::ADD; + marker.lifetime = ros::Duration(); + //marker.scale.x = 0.4; + marker.scale.x = 0.02; + marker.color.r = 1.0f; + //marker.color.g = 1.0f; + //marker.color.b = 1.0f; + marker.color.a = 1.0; + + geometry_msgs::Point point0, point1; + + Eigen2Point(p0, point0); + Eigen2Point(p1, point1); + + marker.points.push_back(point0); + marker.points.push_back(point1); + + m_markers.push_back(marker); +} + + +void CameraPoseVisualization::add_pose(const Eigen::Vector3d& p, const Eigen::Quaterniond& q) { + visualization_msgs::Marker marker; + + marker.ns = m_marker_ns; + marker.id = 0; + marker.type = visualization_msgs::Marker::LINE_STRIP; + marker.action = visualization_msgs::Marker::ADD; + marker.scale.x = m_line_width; + + marker.pose.position.x = 0.0; + marker.pose.position.y = 0.0; + marker.pose.position.z = 0.0; + marker.pose.orientation.w = 1.0; + marker.pose.orientation.x = 0.0; + marker.pose.orientation.y = 0.0; + marker.pose.orientation.z = 0.0; + + + geometry_msgs::Point pt_lt, pt_lb, pt_rt, pt_rb, pt_oc, pt_lt0, pt_lt1, pt_lt2; + + Eigen2Point(q * (m_scale *imlt) + p, pt_lt); + Eigen2Point(q * (m_scale *imlb) + p, pt_lb); + Eigen2Point(q * (m_scale *imrt) + p, pt_rt); + Eigen2Point(q * (m_scale *imrb) + p, pt_rb); + Eigen2Point(q * (m_scale *lt0 ) + p, pt_lt0); + Eigen2Point(q * (m_scale *lt1 ) + p, pt_lt1); + Eigen2Point(q * (m_scale *lt2 ) + p, pt_lt2); + Eigen2Point(q * (m_scale *oc ) + p, pt_oc); + + // image boundaries + marker.points.push_back(pt_lt); + marker.points.push_back(pt_lb); + marker.colors.push_back(m_image_boundary_color); + marker.colors.push_back(m_image_boundary_color); + + marker.points.push_back(pt_lb); + marker.points.push_back(pt_rb); + marker.colors.push_back(m_image_boundary_color); + marker.colors.push_back(m_image_boundary_color); + + marker.points.push_back(pt_rb); + marker.points.push_back(pt_rt); + marker.colors.push_back(m_image_boundary_color); + marker.colors.push_back(m_image_boundary_color); + + marker.points.push_back(pt_rt); + marker.points.push_back(pt_lt); + marker.colors.push_back(m_image_boundary_color); + marker.colors.push_back(m_image_boundary_color); + + // top-left indicator + marker.points.push_back(pt_lt0); + marker.points.push_back(pt_lt1); + marker.colors.push_back(m_image_boundary_color); + marker.colors.push_back(m_image_boundary_color); + + marker.points.push_back(pt_lt1); + marker.points.push_back(pt_lt2); + marker.colors.push_back(m_image_boundary_color); + marker.colors.push_back(m_image_boundary_color); + + // optical center connector + marker.points.push_back(pt_lt); + marker.points.push_back(pt_oc); + marker.colors.push_back(m_optical_center_connector_color); + marker.colors.push_back(m_optical_center_connector_color); + + + marker.points.push_back(pt_lb); + marker.points.push_back(pt_oc); + marker.colors.push_back(m_optical_center_connector_color); + marker.colors.push_back(m_optical_center_connector_color); + + marker.points.push_back(pt_rt); + marker.points.push_back(pt_oc); + marker.colors.push_back(m_optical_center_connector_color); + marker.colors.push_back(m_optical_center_connector_color); + + marker.points.push_back(pt_rb); + marker.points.push_back(pt_oc); + marker.colors.push_back(m_optical_center_connector_color); + marker.colors.push_back(m_optical_center_connector_color); + + m_markers.push_back(marker); +} + +void CameraPoseVisualization::reset() { + m_markers.clear(); + //image.points.clear(); + //image.colors.clear(); +} + +void CameraPoseVisualization::publish_by( ros::Publisher &pub, const std_msgs::Header &header ) { + visualization_msgs::MarkerArray markerArray_msg; + //int k = (int)m_markers.size(); + /* + for (int i = 0; i < 5 && k > 0; i++) + { + k--; + m_markers[k].header = header; + markerArray_msg.markers.push_back(m_markers[k]); + } + */ + + + for(auto& marker : m_markers) { + marker.header = header; + markerArray_msg.markers.push_back(marker); + } + + pub.publish(markerArray_msg); +} + +void CameraPoseVisualization::publish_image_by( ros::Publisher &pub, const std_msgs::Header &header ) { + image.header = header; + + pub.publish(image); +} +/* +void CameraPoseVisualization::add_image(const Eigen::Vector3d& T, const Eigen::Matrix3d& R, const cv::Mat &src) +{ + //image.points.clear(); + //image.colors.clear(); + + image.ns = "image"; + image.id = 0; + image.action = visualization_msgs::Marker::ADD; + image.type = visualization_msgs::Marker::TRIANGLE_LIST; + image.scale.x = 1; + image.scale.y = 1; + image.scale.z = 1; + + geometry_msgs::Point p; + std_msgs::ColorRGBA crgb; + + double center_x = src.rows / 2.0; + double center_y = src.cols / 2.0; + + //double scale = 0.01; + double scale = IMAGE_VISUAL_SCALE; + + for(int r = 0; r < src.cols; ++r) { + for(int c = 0; c < src.rows; ++c) { + float intensity = (float)( src.at(c, r)); + crgb.r = (float)intensity / 255.0; + crgb.g = (float)intensity / 255.0; + crgb.b = (float)intensity / 255.0; + crgb.a = 1.0; + + Eigen::Vector3d p_cam, p_w; + p_cam.z() = 0; + p_cam.x() = (r - center_x) * scale; + p_cam.y() = (c - center_y) * scale; + p_w = R * p_cam + T; + p.x = p_w(0); + p.y = p_w(1); + p.z = p_w(2); + image.points.push_back(p); + image.colors.push_back(crgb); + + p_cam.z() = 0; + p_cam.x() = (r - center_x + 1) * scale; + p_cam.y() = (c - center_y) * scale; + p_w = R * p_cam + T; + p.x = p_w(0); + p.y = p_w(1); + p.z = p_w(2); + image.points.push_back(p); + image.colors.push_back(crgb); + + p_cam.z() = 0; + p_cam.x() = (r - center_x) * scale; + p_cam.y() = (c - center_y + 1) * scale; + p_w = R * p_cam + T; + p.x = p_w(0); + p.y = p_w(1); + p.z = p_w(2); + image.points.push_back(p); + image.colors.push_back(crgb); + + p_cam.z() = 0; + p_cam.x() = (r - center_x + 1) * scale; + p_cam.y() = (c - center_y) * scale; + p_w = R * p_cam + T; + p.x = p_w(0); + p.y = p_w(1); + p.z = p_w(2); + image.points.push_back(p); + image.colors.push_back(crgb); + + p_cam.z() = 0; + p_cam.x() = (r - center_x + 1) * scale; + p_cam.y() = (c - center_y + 1) * scale; + p_w = R * p_cam + T; + p.x = p_w(0); + p.y = p_w(1); + p.z = p_w(2); + image.points.push_back(p); + image.colors.push_back(crgb); + + p_cam.z() = 0; + p_cam.x() = (r - center_x) * scale; + p_cam.y() = (c - center_y + 1) * scale; + p_w = R * p_cam + T; + p.x = p_w(0); + p.y = p_w(1); + p.z = p_w(2); + image.points.push_back(p); + image.colors.push_back(crgb); + } + } +} +*/ diff --git a/loop_fusion/src/utility/CameraPoseVisualization.h b/loop_fusion/src/utility/CameraPoseVisualization.h new file mode 100644 index 0000000..6e21d03 --- /dev/null +++ b/loop_fusion/src/utility/CameraPoseVisualization.h @@ -0,0 +1,58 @@ +/******************************************************* + * Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology + * + * This file is part of VINS. + * + * Licensed under the GNU General Public License v3.0; + * you may not use this file except in compliance with the License. + *******************************************************/ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include "../parameters.h" + +class CameraPoseVisualization { +public: + std::string m_marker_ns; + + CameraPoseVisualization(float r, float g, float b, float a); + + void setImageBoundaryColor(float r, float g, float b, float a=1.0); + void setOpticalCenterConnectorColor(float r, float g, float b, float a=1.0); + void setScale(double s); + void setLineWidth(double width); + + void add_pose(const Eigen::Vector3d& p, const Eigen::Quaterniond& q); + void reset(); + + void publish_by(ros::Publisher& pub, const std_msgs::Header& header); + void add_edge(const Eigen::Vector3d& p0, const Eigen::Vector3d& p1); + void add_loopedge(const Eigen::Vector3d& p0, const Eigen::Vector3d& p1); + //void add_image(const Eigen::Vector3d& T, const Eigen::Matrix3d& R, const cv::Mat &src); + void publish_image_by( ros::Publisher &pub, const std_msgs::Header &header); +private: + std::vector m_markers; + std_msgs::ColorRGBA m_image_boundary_color; + std_msgs::ColorRGBA m_optical_center_connector_color; + double m_scale; + double m_line_width; + visualization_msgs::Marker image; + int LOOP_EDGE_NUM; + int tmp_loop_edge_num; + + static const Eigen::Vector3d imlt; + static const Eigen::Vector3d imlb; + static const Eigen::Vector3d imrt; + static const Eigen::Vector3d imrb; + static const Eigen::Vector3d oc ; + static const Eigen::Vector3d lt0 ; + static const Eigen::Vector3d lt1 ; + static const Eigen::Vector3d lt2 ; +}; diff --git a/loop_fusion/src/utility/Grider_FAST.h b/loop_fusion/src/utility/Grider_FAST.h new file mode 100644 index 0000000..803c42c --- /dev/null +++ b/loop_fusion/src/utility/Grider_FAST.h @@ -0,0 +1,120 @@ +/** + * MIT License + * Copyright (c) 2018 Patrick Geneva @ University of Delaware (Robot Perception & Navigation Group) + * Copyright (c) 2018 Kevin Eckenhoff @ University of Delaware (Robot Perception & Navigation Group) + * Copyright (c) 2018 Guoquan Huang @ University of Delaware (Robot Perception & Navigation Group) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef MSCKF_GRIDER_FAST_H +#define MSCKF_GRIDER_FAST_H + + +#include +#include +#include + + +#include +#include +#include + + +class Grider_FAST { + +public: + + + /** + * Compare keypoints based on their response value + * We want to have the keypoints with the highest values! + * https://stackoverflow.com/a/10910921 + */ + static bool compare_response(cv::KeyPoint first, cv::KeyPoint second) { + return first.response > second.response; + } + + + /** + * This function will perform grid extraction using FAST + * Given a specified grid size, this will try to extract fast features from each grid + * It will then return the best from each grid + */ + static void perform_griding(const cv::Mat &img, std::vector &pts, int num_features, + int grid_x, int grid_y, int threshold, bool nonmaxSuppression) { + + // Calculate the size our extraction boxes should be + int size_x = img.cols / grid_x; + int size_y = img.rows / grid_y; + + // Make sure our sizes are not zero + assert(size_x > 0); + assert(size_y > 0); + + // We want to have equally distributed features + auto num_features_grid = (int) (num_features / (grid_x * grid_y)) + 1; + + // Lets loop through each grid and extract features + for (int x = 0; x < img.cols; x += size_x) { + for (int y = 0; y < img.rows; y += size_y) { + + // Skip if we are out of bounds + if (x + size_x > img.cols || y + size_y > img.rows) + continue; + + // Calculate where we should be extracting from + cv::Rect img_roi = cv::Rect(x, y, size_x, size_y); + + // Extract FAST features for this part of the image + std::vector pts_new; + cv::FAST(img(img_roi), pts_new, threshold, nonmaxSuppression); + + // Now lets get the top number from this + std::sort(pts_new.begin(), pts_new.end(), Grider_FAST::compare_response); + + // Debug print out the response values + //for (auto pt : pts_new) { + // std::cout << pt.response << std::endl; + //} + //std::cout << "======================" << std::endl; + + // Append the "best" ones to our vector + // Note that we need to "correct" the point u,v since we extracted it in a ROI + // So we should append the location of that ROI in the image + for(size_t i=0; i<(size_t)num_features_grid && i +#include +#include + +class TicToc +{ + public: + TicToc() + { + tic(); + } + + void tic() + { + start = std::chrono::system_clock::now(); + } + + double toc() + { + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + return elapsed_seconds.count() * 1000; + } + + private: + std::chrono::time_point start, end; +}; diff --git a/loop_fusion/src/utility/utility.cpp b/loop_fusion/src/utility/utility.cpp new file mode 100644 index 0000000..d9f9b82 --- /dev/null +++ b/loop_fusion/src/utility/utility.cpp @@ -0,0 +1,22 @@ +/******************************************************* + * Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology + * + * This file is part of VINS. + * + * Licensed under the GNU General Public License v3.0; + * you may not use this file except in compliance with the License. + *******************************************************/ + +#include "utility.h" + +Eigen::Matrix3d Utility::g2R(const Eigen::Vector3d &g) +{ + Eigen::Matrix3d R0; + Eigen::Vector3d ng1 = g.normalized(); + Eigen::Vector3d ng2{0, 0, 1.0}; + R0 = Eigen::Quaterniond::FromTwoVectors(ng1, ng2).toRotationMatrix(); + double yaw = Utility::R2ypr(R0).x(); + R0 = Utility::ypr2R(Eigen::Vector3d{-yaw, 0, 0}) * R0; + // R0 = Utility::ypr2R(Eigen::Vector3d{-90, 0, 0}) * R0; + return R0; +} diff --git a/loop_fusion/src/utility/utility.h b/loop_fusion/src/utility/utility.h new file mode 100644 index 0000000..6872696 --- /dev/null +++ b/loop_fusion/src/utility/utility.h @@ -0,0 +1,149 @@ +/******************************************************* + * Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology + * + * This file is part of VINS. + * + * Licensed under the GNU General Public License v3.0; + * you may not use this file except in compliance with the License. + *******************************************************/ + +#pragma once + +#include +#include +#include +#include + +class Utility +{ + public: + template + static Eigen::Quaternion deltaQ(const Eigen::MatrixBase &theta) + { + typedef typename Derived::Scalar Scalar_t; + + Eigen::Quaternion dq; + Eigen::Matrix half_theta = theta; + half_theta /= static_cast(2.0); + dq.w() = static_cast(1.0); + dq.x() = half_theta.x(); + dq.y() = half_theta.y(); + dq.z() = half_theta.z(); + return dq; + } + + template + static Eigen::Matrix skewSymmetric(const Eigen::MatrixBase &q) + { + Eigen::Matrix ans; + ans << typename Derived::Scalar(0), -q(2), q(1), + q(2), typename Derived::Scalar(0), -q(0), + -q(1), q(0), typename Derived::Scalar(0); + return ans; + } + + template + static Eigen::Quaternion positify(const Eigen::QuaternionBase &q) + { + //printf("a: %f %f %f %f", q.w(), q.x(), q.y(), q.z()); + //Eigen::Quaternion p(-q.w(), -q.x(), -q.y(), -q.z()); + //printf("b: %f %f %f %f", p.w(), p.x(), p.y(), p.z()); + //return q.template w() >= (typename Derived::Scalar)(0.0) ? q : Eigen::Quaternion(-q.w(), -q.x(), -q.y(), -q.z()); + return q; + } + + template + static Eigen::Matrix Qleft(const Eigen::QuaternionBase &q) + { + Eigen::Quaternion qq = positify(q); + Eigen::Matrix ans; + ans(0, 0) = qq.w(), ans.template block<1, 3>(0, 1) = -qq.vec().transpose(); + ans.template block<3, 1>(1, 0) = qq.vec(), ans.template block<3, 3>(1, 1) = qq.w() * Eigen::Matrix::Identity() + skewSymmetric(qq.vec()); + return ans; + } + + template + static Eigen::Matrix Qright(const Eigen::QuaternionBase &p) + { + Eigen::Quaternion pp = positify(p); + Eigen::Matrix ans; + ans(0, 0) = pp.w(), ans.template block<1, 3>(0, 1) = -pp.vec().transpose(); + ans.template block<3, 1>(1, 0) = pp.vec(), ans.template block<3, 3>(1, 1) = pp.w() * Eigen::Matrix::Identity() - skewSymmetric(pp.vec()); + return ans; + } + + static Eigen::Vector3d R2ypr(const Eigen::Matrix3d &R) + { + Eigen::Vector3d n = R.col(0); + Eigen::Vector3d o = R.col(1); + Eigen::Vector3d a = R.col(2); + + Eigen::Vector3d ypr(3); + double y = atan2(n(1), n(0)); + double p = atan2(-n(2), n(0) * cos(y) + n(1) * sin(y)); + double r = atan2(a(0) * sin(y) - a(1) * cos(y), -o(0) * sin(y) + o(1) * cos(y)); + ypr(0) = y; + ypr(1) = p; + ypr(2) = r; + + return ypr / M_PI * 180.0; + } + + template + static Eigen::Matrix ypr2R(const Eigen::MatrixBase &ypr) + { + typedef typename Derived::Scalar Scalar_t; + + Scalar_t y = ypr(0) / 180.0 * M_PI; + Scalar_t p = ypr(1) / 180.0 * M_PI; + Scalar_t r = ypr(2) / 180.0 * M_PI; + + Eigen::Matrix Rz; + Rz << cos(y), -sin(y), 0, + sin(y), cos(y), 0, + 0, 0, 1; + + Eigen::Matrix Ry; + Ry << cos(p), 0., sin(p), + 0., 1., 0., + -sin(p), 0., cos(p); + + Eigen::Matrix Rx; + Rx << 1., 0., 0., + 0., cos(r), -sin(r), + 0., sin(r), cos(r); + + return Rz * Ry * Rx; + } + + static Eigen::Matrix3d g2R(const Eigen::Vector3d &g); + + template + struct uint_ + { + }; + + template + void unroller(const Lambda &f, const IterT &iter, uint_) + { + unroller(f, iter, uint_()); + f(iter + N); + } + + template + void unroller(const Lambda &f, const IterT &iter, uint_<0>) + { + f(iter); + } + + template + static T normalizeAngle(const T& angle_degrees) { + T two_pi(2.0 * 180); + if (angle_degrees > 0) + return angle_degrees - + two_pi * std::floor((angle_degrees + T(180)) / two_pi); + else + return angle_degrees + + two_pi * std::floor((-angle_degrees + T(180)) / two_pi); + }; +}; diff --git a/support_files/brief_k10L6.bin b/support_files/brief_k10L6.bin new file mode 100644 index 0000000..3cba0b1 Binary files /dev/null and b/support_files/brief_k10L6.bin differ diff --git a/support_files/brief_pattern.yml b/support_files/brief_pattern.yml new file mode 100644 index 0000000..7e67fed --- /dev/null +++ b/support_files/brief_pattern.yml @@ -0,0 +1,1029 @@ +%YAML:1.0 +x1: + - 0 + - 4 + - 11 + - -4 + - 24 + - -3 + - -4 + - -7 + - -5 + - -2 + - 8 + - 1 + - -2 + - -5 + - -5 + - -8 + - 2 + - -8 + - 6 + - 4 + - -1 + - -14 + - 12 + - -12 + - -20 + - -11 + - 7 + - -11 + - 18 + - -10 + - 10 + - -20 + - 7 + - -2 + - 3 + - 7 + - -20 + - -3 + - -4 + - -6 + - -4 + - 14 + - 1 + - 2 + - -9 + - 15 + - 13 + - 14 + - -21 + - -6 + - 3 + - -7 + - 23 + - -22 + - -12 + - 0 + - -22 + - 1 + - -11 + - -11 + - -8 + - -7 + - -16 + - -6 + - 4 + - -16 + - 4 + - 7 + - 0 + - 0 + - -7 + - -4 + - 16 + - -3 + - 6 + - 5 + - 0 + - 8 + - -4 + - -12 + - 0 + - -11 + - 11 + - 3 + - -13 + - 9 + - 11 + - -23 + - 0 + - 6 + - 6 + - 3 + - -7 + - 6 + - 2 + - -4 + - 24 + - -3 + - 5 + - -5 + - 0 + - 0 + - -4 + - -10 + - -1 + - 8 + - 6 + - 3 + - -7 + - 7 + - -4 + - 5 + - 8 + - 10 + - 8 + - -10 + - 2 + - 6 + - 0 + - 10 + - -2 + - 5 + - -2 + - -1 + - -12 + - -10 + - -15 + - 12 + - 1 + - -6 + - 12 + - -7 + - -7 + - -8 + - 7 + - -11 + - 2 + - -22 + - 7 + - 20 + - -8 + - 4 + - 3 + - 11 + - -9 + - 4 + - -4 + - -12 + - 12 + - -22 + - -6 + - 7 + - 5 + - -1 + - 9 + - 10 + - -15 + - -2 + - 1 + - 14 + - 21 + - 3 + - 6 + - -13 + - 5 + - -19 + - 10 + - 6 + - 2 + - -14 + - -16 + - -15 + - 6 + - -8 + - 18 + - 7 + - 16 + - -4 + - -2 + - -4 + - 0 + - -4 + - -14 + - 0 + - -4 + - -13 + - -1 + - -14 + - 11 + - -4 + - -15 + - -8 + - -6 + - 3 + - -1 + - -1 + - 4 + - 8 + - 3 + - 0 + - 24 + - -3 + - -11 + - -10 + - -2 + - 2 + - -1 + - 4 + - 2 + - -1 + - 11 + - 0 + - 10 + - -1 + - 6 + - -4 + - -19 + - 1 + - 9 + - -6 + - -1 + - 12 + - 0 + - -9 + - -1 + - 5 + - 0 + - -6 + - -5 + - 0 + - 7 + - -19 + - 11 + - -9 + - 11 + - 0 + - 8 + - -1 + - 5 + - 10 + - -2 + - -6 + - -6 + - 0 + - -3 + - -7 + - 4 + - -4 + - 2 + - -10 + - -7 + - 18 + - 0 + - 12 + - -2 + - 0 +y1: + - -10 + - 12 + - 13 + - 8 + - -2 + - 2 + - 4 + - 4 + - -11 + - -4 + - 7 + - 15 + - 17 + - 0 + - 10 + - -2 + - -7 + - 10 + - 11 + - -3 + - -4 + - 11 + - -5 + - -15 + - 0 + - 3 + - -3 + - -11 + - 3 + - -4 + - 4 + - 22 + - 16 + - 3 + - -2 + - -3 + - 0 + - 5 + - 15 + - -8 + - 2 + - 2 + - -12 + - -10 + - 5 + - 4 + - 10 + - -4 + - 20 + - -7 + - 5 + - 0 + - -18 + - 2 + - -2 + - -23 + - 15 + - 4 + - -1 + - -2 + - 3 + - 0 + - -16 + - 5 + - 0 + - -16 + - -2 + - 5 + - 6 + - 4 + - 13 + - -1 + - -16 + - -13 + - -7 + - -3 + - 4 + - 0 + - -10 + - -10 + - -9 + - 4 + - -8 + - 0 + - -15 + - -2 + - -10 + - 16 + - 1 + - 0 + - 6 + - -11 + - -3 + - -7 + - -2 + - 10 + - -3 + - -2 + - 0 + - 10 + - -7 + - 0 + - 4 + - 22 + - -5 + - 0 + - -1 + - -8 + - -1 + - -4 + - 12 + - -2 + - 18 + - -2 + - -6 + - -4 + - -2 + - -16 + - -8 + - 1 + - -5 + - 2 + - 3 + - 2 + - 7 + - -8 + - 3 + - 8 + - 0 + - -10 + - -4 + - -18 + - 9 + - 12 + - 3 + - 17 + - -8 + - -13 + - 5 + - -1 + - 8 + - -10 + - -3 + - -9 + - -2 + - 7 + - 5 + - -5 + - -8 + - 15 + - -12 + - -5 + - 0 + - 2 + - 15 + - -5 + - -4 + - -1 + - -9 + - 0 + - -5 + - -11 + - -5 + - -5 + - -10 + - 2 + - 16 + - 6 + - 17 + - 0 + - -7 + - 15 + - 5 + - -14 + - -2 + - 6 + - 5 + - 2 + - 12 + - -3 + - 2 + - 4 + - 13 + - 1 + - -1 + - -3 + - -1 + - -11 + - 8 + - -3 + - 19 + - 8 + - 7 + - -3 + - 5 + - 19 + - 4 + - -6 + - 4 + - -14 + - 3 + - 10 + - -15 + - -4 + - 14 + - 1 + - 14 + - -1 + - -9 + - -5 + - -16 + - 10 + - 6 + - 3 + - -13 + - 19 + - -7 + - -7 + - -12 + - -7 + - 1 + - 7 + - -7 + - 15 + - -2 + - -15 + - 6 + - 6 + - 2 + - -6 + - -16 + - 0 + - -5 + - -9 + - 11 + - 21 + - 3 + - 8 + - 23 + - -5 + - -20 + - -7 + - 13 + - -7 + - -7 + - 0 + - 6 + - 10 + - -15 + - 1 + - 0 + - 0 + - 0 + - -7 + - 10 + - -3 +x2: + - 0 + - 0 + - 13 + - -6 + - 22 + - -7 + - -3 + - -12 + - 1 + - -4 + - 10 + - 4 + - -6 + - 0 + - -8 + - -5 + - 7 + - -2 + - -1 + - 8 + - -2 + - -14 + - 14 + - -10 + - -14 + - -4 + - 6 + - -14 + - 15 + - -5 + - 9 + - -18 + - 4 + - 0 + - -4 + - 3 + - -19 + - -10 + - -8 + - 0 + - -4 + - 10 + - 1 + - 5 + - -12 + - 19 + - 2 + - 11 + - -18 + - -5 + - -3 + - -5 + - 21 + - -17 + - -14 + - 1 + - -17 + - 5 + - -8 + - -8 + - -8 + - -1 + - -16 + - 1 + - -2 + - -12 + - 0 + - 3 + - 7 + - -1 + - -10 + - -2 + - 14 + - -5 + - 0 + - 4 + - -2 + - 15 + - 2 + - -9 + - -9 + - -11 + - 9 + - 2 + - -15 + - 8 + - 9 + - -24 + - -5 + - 10 + - 7 + - 0 + - -7 + - 6 + - 2 + - 0 + - 24 + - 0 + - 2 + - 0 + - 2 + - -1 + - -5 + - -10 + - -4 + - 19 + - 2 + - -6 + - -8 + - 13 + - -1 + - 5 + - 3 + - 13 + - 8 + - -10 + - 0 + - 1 + - -5 + - 11 + - -6 + - 8 + - -7 + - 1 + - -14 + - -6 + - -10 + - 11 + - 2 + - -5 + - 12 + - -12 + - -3 + - -5 + - 7 + - -5 + - -3 + - -16 + - 9 + - 20 + - -9 + - 8 + - 3 + - 15 + - -12 + - 4 + - -6 + - -15 + - 10 + - -17 + - -13 + - 6 + - 4 + - -3 + - 9 + - 7 + - -18 + - -10 + - -3 + - 10 + - 23 + - -9 + - 10 + - -15 + - 7 + - -14 + - 13 + - 10 + - 0 + - -20 + - -14 + - -15 + - 3 + - -7 + - 21 + - 7 + - 17 + - 1 + - 0 + - -2 + - -8 + - -11 + - -17 + - 2 + - -6 + - -18 + - -5 + - -10 + - 5 + - 2 + - -13 + - -6 + - -4 + - 0 + - 3 + - 2 + - 10 + - 10 + - 4 + - 0 + - 21 + - -13 + - -16 + - -17 + - 0 + - 3 + - -1 + - 6 + - 5 + - 8 + - 14 + - 2 + - 10 + - -6 + - 1 + - 0 + - -23 + - 0 + - 6 + - -4 + - 3 + - 9 + - -5 + - -6 + - 1 + - 12 + - 2 + - -6 + - -4 + - 7 + - -1 + - -17 + - 9 + - -12 + - 13 + - -1 + - 11 + - 5 + - 7 + - 10 + - -2 + - -3 + - -8 + - 1 + - 0 + - -6 + - 5 + - -6 + - 5 + - -12 + - -5 + - 21 + - 2 + - 17 + - -7 + - -1 +y2: + - -6 + - 6 + - 10 + - 9 + - 0 + - 4 + - 1 + - 1 + - -12 + - 3 + - 1 + - 9 + - 15 + - 0 + - 14 + - 3 + - -13 + - 11 + - 10 + - -4 + - -2 + - 7 + - -7 + - -17 + - -6 + - 7 + - -4 + - -16 + - -6 + - -2 + - 6 + - 19 + - 8 + - 4 + - -1 + - -2 + - 3 + - 3 + - 21 + - -3 + - 6 + - 5 + - -12 + - -5 + - 0 + - 5 + - 12 + - -5 + - 24 + - -11 + - 7 + - 0 + - -20 + - 0 + - -4 + - -21 + - 15 + - 2 + - -4 + - 0 + - -3 + - 0 + - -19 + - 5 + - 4 + - -19 + - -1 + - 5 + - 4 + - 0 + - 10 + - 3 + - -15 + - -15 + - -7 + - -1 + - 8 + - -6 + - -6 + - -14 + - -15 + - 1 + - -8 + - -1 + - -5 + - 2 + - -10 + - 17 + - 5 + - 0 + - 9 + - -8 + - -5 + - -2 + - -1 + - 9 + - 0 + - -5 + - -2 + - 13 + - -4 + - -1 + - 0 + - 16 + - -10 + - -1 + - 0 + - -6 + - 3 + - -6 + - 10 + - 1 + - 18 + - -2 + - -10 + - -4 + - -1 + - -13 + - -4 + - -1 + - -8 + - 7 + - 4 + - -3 + - 4 + - -14 + - -4 + - 11 + - -1 + - -12 + - -5 + - -14 + - 9 + - 16 + - 1 + - 20 + - -9 + - -10 + - 10 + - 2 + - 4 + - -2 + - -4 + - -6 + - -6 + - 5 + - 5 + - -4 + - 0 + - 18 + - -15 + - -5 + - 0 + - 1 + - 8 + - -4 + - -2 + - 0 + - -11 + - 0 + - -7 + - -5 + - 0 + - 0 + - -3 + - 5 + - 12 + - 3 + - 13 + - 1 + - -9 + - 19 + - 4 + - -9 + - 4 + - 2 + - 3 + - -1 + - 14 + - 0 + - 2 + - 4 + - 14 + - 8 + - 0 + - -4 + - 0 + - -12 + - 9 + - 0 + - 23 + - 7 + - 0 + - -9 + - 2 + - 22 + - 2 + - -14 + - 6 + - -16 + - 1 + - 4 + - -14 + - -10 + - 19 + - 6 + - 16 + - -4 + - -9 + - 0 + - -7 + - 12 + - 5 + - 5 + - -14 + - 21 + - -5 + - -9 + - -17 + - -5 + - 4 + - 14 + - -5 + - 9 + - -4 + - -21 + - 4 + - 1 + - 5 + - 0 + - -12 + - 2 + - 0 + - -1 + - 6 + - 24 + - 0 + - 15 + - 23 + - -10 + - -12 + - -2 + - 12 + - -4 + - -5 + - 0 + - 5 + - 13 + - -21 + - 0 + - -2 + - 1 + - 6 + - -4 + - 9 + - -2 diff --git a/support_files/paper_bib.txt b/support_files/paper_bib.txt new file mode 100644 index 0000000..4018604 --- /dev/null +++ b/support_files/paper_bib.txt @@ -0,0 +1,32 @@ +@misc{qin2019a, + Author = {Tong Qin and Jie Pan and Shaozu Cao and Shaojie Shen}, + Title = {A General Optimization-based Framework for Local Odometry Estimation with Multiple Sensors}, + Year = {2019}, + Eprint = {arXiv:1901.03638} +} + +@misc{qin2019b, + Author = {Tong Qin and Shaozu Cao and Jie Pan and Shaojie Shen}, + Title = {A General Optimization-based Framework for Global Pose Estimation with Multiple Sensors}, + Year = {2019}, + Eprint = {arXiv:1901.03642} +} + +@inproceedings{qin2018online, + title={Online Temporal Calibration for Monocular Visual-Inertial Systems}, + author={Qin, Tong and Shen, Shaojie}, + booktitle={2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, + pages={3662--3669}, + year={2018}, + organization={IEEE} +} + +@article{qin2017vins, + title={VINS-Mono: A Robust and Versatile Monocular Visual-Inertial State Estimator}, + author={Qin, Tong and Li, Peiliang and Shen, Shaojie}, + journal={IEEE Transactions on Robotics}, + year={2018}, + volume={34}, + number={4}, + pages={1004-1020} +}