[Zrouter-src-freebsd] ZRouter.org: push to FreeBSD HEAD tree

zrouter-src-freebsd at zrouter.org zrouter-src-freebsd at zrouter.org
Tue Apr 17 08:58:17 UTC 2012


details:   http://zrouter.org/hg/FreeBSD/head//rev/7bbd6bca528b
changeset: 451:7bbd6bca528b
user:      Aleksandr Rybalko <ray at ddteam.net>
date:      Tue Apr 17 11:33:49 2012 +0300
description:
Add new files from FreeBSD HEAD @svn 234370r.

diffstat:

 head/cddl/contrib/opensolaris/lib/libdtrace/mips/dt_isadep.c                                     |      75 +
 head/contrib/com_err/ChangeLog                                                                   |     235 -
 head/contrib/jemalloc/COPYING                                                                    |      27 +
 head/contrib/jemalloc/ChangeLog                                                                  |     322 +
 head/contrib/jemalloc/FREEBSD-Xlist                                                              |      23 +
 head/contrib/jemalloc/FREEBSD-diffs                                                              |     247 +
 head/contrib/jemalloc/FREEBSD-upgrade                                                            |     122 +
 head/contrib/jemalloc/VERSION                                                                    |       1 +
 head/contrib/jemalloc/doc/jemalloc.3                                                             |    1464 +
 head/contrib/jemalloc/include/jemalloc/internal/arena.h                                          |     685 +
 head/contrib/jemalloc/include/jemalloc/internal/atomic.h                                         |     240 +
 head/contrib/jemalloc/include/jemalloc/internal/base.h                                           |      26 +
 head/contrib/jemalloc/include/jemalloc/internal/bitmap.h                                         |     184 +
 head/contrib/jemalloc/include/jemalloc/internal/chunk.h                                          |      58 +
 head/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h                                      |      24 +
 head/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h                                     |      22 +
 head/contrib/jemalloc/include/jemalloc/internal/ckh.h                                            |      90 +
 head/contrib/jemalloc/include/jemalloc/internal/ctl.h                                            |     109 +
 head/contrib/jemalloc/include/jemalloc/internal/extent.h                                         |      43 +
 head/contrib/jemalloc/include/jemalloc/internal/hash.h                                           |      70 +
 head/contrib/jemalloc/include/jemalloc/internal/huge.h                                           |      40 +
 head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h                              |     876 +
 head/contrib/jemalloc/include/jemalloc/internal/mb.h                                             |     115 +
 head/contrib/jemalloc/include/jemalloc/internal/mutex.h                                          |      88 +
 head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h                              |     274 +
 head/contrib/jemalloc/include/jemalloc/internal/prng.h                                           |      60 +
 head/contrib/jemalloc/include/jemalloc/internal/prof.h                                           |     535 +
 head/contrib/jemalloc/include/jemalloc/internal/ql.h                                             |      83 +
 head/contrib/jemalloc/include/jemalloc/internal/qr.h                                             |      67 +
 head/contrib/jemalloc/include/jemalloc/internal/quarantine.h                                     |      24 +
 head/contrib/jemalloc/include/jemalloc/internal/rb.h                                             |     973 +
 head/contrib/jemalloc/include/jemalloc/internal/rtree.h                                          |     161 +
 head/contrib/jemalloc/include/jemalloc/internal/size_classes.h                                   |     721 +
 head/contrib/jemalloc/include/jemalloc/internal/stats.h                                          |     173 +
 head/contrib/jemalloc/include/jemalloc/internal/tcache.h                                         |     494 +
 head/contrib/jemalloc/include/jemalloc/internal/tsd.h                                            |     309 +
 head/contrib/jemalloc/include/jemalloc/internal/util.h                                           |     146 +
 head/contrib/jemalloc/include/jemalloc/jemalloc.h                                                |     141 +
 head/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h                                        |      76 +
 head/contrib/jemalloc/include/jemalloc/jemalloc_defs.h                                           |     239 +
 head/contrib/jemalloc/src/arena.c                                                                |    2248 +
 head/contrib/jemalloc/src/atomic.c                                                               |       2 +
 head/contrib/jemalloc/src/base.c                                                                 |     138 +
 head/contrib/jemalloc/src/bitmap.c                                                               |      90 +
 head/contrib/jemalloc/src/chunk.c                                                                |     304 +
 head/contrib/jemalloc/src/chunk_dss.c                                                            |     159 +
 head/contrib/jemalloc/src/chunk_mmap.c                                                           |     207 +
 head/contrib/jemalloc/src/ckh.c                                                                  |     609 +
 head/contrib/jemalloc/src/ctl.c                                                                  |    1385 +
 head/contrib/jemalloc/src/extent.c                                                               |      39 +
 head/contrib/jemalloc/src/hash.c                                                                 |       2 +
 head/contrib/jemalloc/src/huge.c                                                                 |     306 +
 head/contrib/jemalloc/src/jemalloc.c                                                             |    1733 +
 head/contrib/jemalloc/src/mb.c                                                                   |       2 +
 head/contrib/jemalloc/src/mutex.c                                                                |     153 +
 head/contrib/jemalloc/src/prof.c                                                                 |    1243 +
 head/contrib/jemalloc/src/quarantine.c                                                           |     163 +
 head/contrib/jemalloc/src/rtree.c                                                                |      46 +
 head/contrib/jemalloc/src/stats.c                                                                |     550 +
 head/contrib/jemalloc/src/tcache.c                                                               |     435 +
 head/contrib/jemalloc/src/tsd.c                                                                  |      72 +
 head/contrib/jemalloc/src/util.c                                                                 |     635 +
 head/contrib/llvm/include/llvm-c/TargetMachine.h                                                 |     142 +
 head/contrib/llvm/include/llvm-c/Transforms/Vectorize.h                                          |      48 +
 head/contrib/llvm/include/llvm/ADT/Hashing.h                                                     |     770 +
 head/contrib/llvm/include/llvm/ADT/SparseSet.h                                                   |     268 +
 head/contrib/llvm/include/llvm/ADT/VariadicFunction.h                                            |     331 +
 head/contrib/llvm/include/llvm/ADT/edit_distance.h                                               |     102 +
 head/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h                                           |     167 +
 head/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h                                           |     207 +
 head/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h                                      |     203 +
 head/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h                                        |      91 +
 head/contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h                                   |     142 +
 head/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h                                       |     344 +
 head/contrib/llvm/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h                           |     102 +
 head/contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h                                 |     124 +
 head/contrib/llvm/include/llvm/IntrinsicsHexagon.td                                              |    3671 +
 head/contrib/llvm/include/llvm/MC/MCWinCOFFObjectWriter.h                                        |      36 +
 head/contrib/llvm/include/llvm/Object/ELF.h                                                      |    2209 +
 head/contrib/llvm/include/llvm/Support/DataStream.h                                              |      38 +
 head/contrib/llvm/include/llvm/Support/JSONParser.h                                              |     448 +
 head/contrib/llvm/include/llvm/Support/LockFileManager.h                                         |      74 +
 head/contrib/llvm/include/llvm/Support/SaveAndRestore.h                                          |      47 +
 head/contrib/llvm/include/llvm/Support/StreamableMemoryObject.h                                  |     181 +
 head/contrib/llvm/include/llvm/Support/YAMLParser.h                                              |     549 +
 head/contrib/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h                                |      66 +
 head/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h                                    |      33 +
 head/contrib/llvm/include/llvm/Transforms/Vectorize.h                                            |     106 +
 head/contrib/llvm/lib/Analysis/CodeMetrics.cpp                                                   |     184 +
 head/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp                                     |     287 +
 head/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h                                       |     290 +
 head/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp                                                  |     223 +
 head/contrib/llvm/lib/CodeGen/JITCodeEmitter.cpp                                                 |      14 +
 head/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp                                          |    1001 +
 head/contrib/llvm/lib/CodeGen/MachineCodeEmitter.cpp                                             |      14 +
 head/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp                                         |     340 +
 head/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp                                             |     278 +
 head/contrib/llvm/lib/CodeGen/MachineScheduler.cpp                                               |     614 +
 head/contrib/llvm/lib/CodeGen/RegAllocBase.cpp                                                   |     280 +
 head/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp                             |     657 +
 head/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp                                   |     276 +
 head/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp                                |     631 +
 head/contrib/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp                                        |      45 +
 head/contrib/llvm/lib/CodeGen/TargetOptionsImpl.cpp                                              |      52 +
 head/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h                                      |      67 +
 head/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp                   |     183 +
 head/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp                               |      14 +
 head/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp                   |     177 +
 head/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp                            |     263 +
 head/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp                             |     262 +
 head/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h                               |      62 +
 head/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h                             |      70 +
 head/contrib/llvm/lib/MC/MCDisassembler/EDMain.cpp                                               |     280 +
 head/contrib/llvm/lib/Support/DataStream.cpp                                                     |      98 +
 head/contrib/llvm/lib/Support/Hashing.cpp                                                        |      29 +
 head/contrib/llvm/lib/Support/IntrusiveRefCntPtr.cpp                                             |      14 +
 head/contrib/llvm/lib/Support/JSONParser.cpp                                                     |     302 +
 head/contrib/llvm/lib/Support/LockFileManager.cpp                                                |     216 +
 head/contrib/llvm/lib/Support/StreamableMemoryObject.cpp                                         |     140 +
 head/contrib/llvm/lib/Support/YAMLParser.cpp                                                     |    2117 +
 head/contrib/llvm/lib/TableGen/TableGenAction.cpp                                                |      15 +
 head/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.cpp                                      |      14 +
 head/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp                             |     283 +
 head/contrib/llvm/lib/Target/CellSPU/SPUMachineFunction.cpp                                      |      14 +
 head/contrib/llvm/lib/Target/Hexagon/Hexagon.h                                                   |      74 +
 head/contrib/llvm/lib/Target/Hexagon/Hexagon.td                                                  |      72 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp                                       |     313 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.h                                         |     165 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp                                     |     235 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonCallingConv.td                                       |      35 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp                                 |     207 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.h                                   |     189 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp                              |     177 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp                                    |     332 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.h                                      |      50 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp                                    |     644 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp                                     |    1485 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp                                     |    1496 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h                                       |     162 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonImmediates.td                                        |     508 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td                                      |     308 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td                                    |      67 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp                                        |    2732 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h                                          |     185 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td                                         |    3052 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV3.td                                       |     137 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td                                       |    5746 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsics.td                                        |    3462 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td                                 |      29 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV3.td                                      |      50 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td                                      |     369 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonMCInst.h                                             |      41 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp                                      |      93 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h                                |      75 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp                                         |     288 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp                                     |     315 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h                                       |      90 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td                                      |     167 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp                                  |      82 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td                                          |      54 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td                                        |      59 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonSelectCCInfo.td                                      |     121 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp                                 |      46 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.h                                   |      40 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp                                 |     129 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp                                        |      62 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h                                          |      74 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp                                    |     145 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h                                      |      83 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp                                 |      94 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.h                                   |      40 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp                                   |    3642 +
 head/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h                           |     141 +
 head/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp                          |     198 +
 head/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h                            |      75 +
 head/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h                              |      70 +
 head/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp                           |      36 +
 head/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h                             |      30 +
 head/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp                        |      95 +
 head/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h                          |      39 +
 head/contrib/llvm/lib/Target/Hexagon/TargetInfo/HexagonTargetInfo.cpp                            |      19 +
 head/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.cpp                                    |      14 +
 head/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp                       |      77 +
 head/contrib/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp                                |      14 +
 head/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp                                    |      66 +
 head/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp                           |     249 +
 head/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp                                       |     153 +
 head/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.h                                         |      63 +
 head/contrib/llvm/lib/Target/Mips/MipsCondMov.td                                                 |     194 +
 head/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp                                        |      50 +
 head/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.cpp                                      |      14 +
 head/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp                         |     103 +
 head/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp                                  |      15 +
 head/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td                                           |     616 +
 head/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td                                            |     652 +
 head/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.cpp                                  |      14 +
 head/contrib/llvm/lib/Target/TargetJITInfo.cpp                                                   |      14 +
 head/contrib/llvm/lib/Target/TargetMachineC.cpp                                                  |     197 +
 head/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp                             |     224 +
 head/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp                         |      65 +
 head/contrib/llvm/lib/Target/X86/X86InstrSVM.td                                                  |      62 +
 head/contrib/llvm/lib/Target/X86/X86InstrXOP.td                                                  |     307 +
 head/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.cpp                                      |      14 +
 head/contrib/llvm/lib/Target/X86/X86Schedule.td                                                  |     273 +
 head/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td                                              |     305 +
 head/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.cpp                                  |      14 +
 head/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp                            |     937 +
 head/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.cpp                           |      79 +
 head/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.h                             |      37 +
 head/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp                             |     311 +
 head/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp                                          |     226 +
 head/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp                                       |      96 +
 head/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp                                     |     372 +
 head/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp                                           |      64 +
 head/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp                                       |    1907 +
 head/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp                                         |      39 +
 head/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def                                 |     224 +
 head/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h                                    |      84 +
 head/contrib/llvm/tools/clang/include/clang/AST/LambdaMangleContext.h                            |      36 +
 head/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h                                          |     152 +
 head/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/Dominators.h                       |     212 +
 head/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h                 |     111 +
 head/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h                                 |     257 +
 head/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h                               |      39 +
 head/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def                            |     689 +
 head/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td                |      60 +
 head/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h                                       |      38 +
 head/contrib/llvm/tools/clang/include/clang/Basic/Module.h                                       |     284 +
 head/contrib/llvm/tools/clang/include/clang/Edit/Commit.h                                        |     140 +
 head/contrib/llvm/tools/clang/include/clang/Edit/EditedSource.h                                  |      87 +
 head/contrib/llvm/tools/clang/include/clang/Edit/EditsReceiver.h                                 |      35 +
 head/contrib/llvm/tools/clang/include/clang/Edit/FileOffset.h                                    |      65 +
 head/contrib/llvm/tools/clang/include/clang/Edit/Rewriters.h                                     |      33 +
 head/contrib/llvm/tools/clang/include/clang/Frontend/ChainedIncludesSource.h                     |      75 +
 head/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h                        |     149 +
 head/contrib/llvm/tools/clang/include/clang/Frontend/LayoutOverrideSource.h                      |      61 +
 head/contrib/llvm/tools/clang/include/clang/Frontend/MigratorOptions.h                           |      31 +
 head/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h               |      62 +
 head/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h                            |     120 +
 head/contrib/llvm/tools/clang/include/clang/Index/GlobalCallGraph.h                              |     149 +
 head/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h                                      |     237 +
 head/contrib/llvm/tools/clang/include/clang/Serialization/SerializationDiagnostic.h              |      28 +
 head/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CommonBugCategories.h        |      24 +
 head/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h  |     107 +
 head/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h |      43 +
 head/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h     |      40 +
 head/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h         |      27 +
 head/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h                        |     164 +
 head/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h                                    |     213 +
 head/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp                                          |     226 +
 head/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp                                    |     358 +
 head/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp                                    |      84 +
 head/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp                                    |      30 +
 head/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp                                                  |     312 +
 head/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp                               |     463 +
 head/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp                                         |     184 +
 head/contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp                                        |      14 +
 head/contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp                                  |      49 +
 head/contrib/llvm/tools/clang/lib/Basic/Module.cpp                                               |     274 +
 head/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp                                    |     368 +
 head/contrib/llvm/tools/clang/lib/Edit/Commit.cpp                                                |     345 +
 head/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp                                          |     329 +
 head/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp                              |     587 +
 head/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp                         |      14 +
 head/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp                             |     240 +
 head/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp                                   |     140 +
 head/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp                                |     386 +
 head/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp                              |     206 +
 head/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp                       |     592 +
 head/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp                                    |     881 +
 head/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h                                           |     961 +
 head/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h                                           |      75 +
 head/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h                                            |     115 +
 head/contrib/llvm/tools/clang/lib/Headers/cpuid.h                                                |      33 +
 head/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h                                           |     231 +
 head/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h                                          |      55 +
 head/contrib/llvm/tools/clang/lib/Headers/module.map                                             |     108 +
 head/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h                                         |      45 +
 head/contrib/llvm/tools/clang/lib/Headers/unwind.h                                               |     124 +
 head/contrib/llvm/tools/clang/lib/Index/GlobalCallGraph.cpp                                      |     152 +
 head/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp                                              |    1437 +
 head/contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp                                            |      14 +
 head/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp                                  |    7275 +
 head/contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp                                          |      14 +
 head/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp                                            |     820 +
 head/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp                                      |    1351 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp              |     157 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp               |     191 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp               |     233 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp                |      18 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp                |     740 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h                      |      22 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp                |     211 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp           |     174 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp              |     159 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp                 |      62 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp                 |     241 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp                        |      38 +
 head/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp                              |      14 +
 head/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp                                |     230 +
 head/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp                                            |     296 +
 head/contrib/llvm/tools/llvm-readobj/llvm-readobj.cpp                                            |     218 +
 head/contrib/llvm/tools/llvm-stress/llvm-stress.cpp                                              |     702 +
 head/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp                                        |     512 +
 head/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.h                                          |      52 +
 head/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h                                         |     139 +
 head/contrib/llvm/utils/TableGen/X86ModRMFilters.cpp                                             |      26 +
 head/crypto/heimdal/admin/destroy.c                                                              |      52 +
 head/crypto/heimdal/appl/login/login-protos.h                                                    |      91 +
 head/crypto/heimdal/base/Makefile.am                                                             |      31 +
 head/crypto/heimdal/base/Makefile.in                                                             |     941 +
 head/crypto/heimdal/base/NTMakefile                                                              |      53 +
 head/crypto/heimdal/base/array.c                                                                 |     234 +
 head/crypto/heimdal/base/baselocl.h                                                              |     129 +
 head/crypto/heimdal/base/bool.c                                                                  |      58 +
 head/crypto/heimdal/base/dict.c                                                                  |     282 +
 head/crypto/heimdal/base/heimbase.c                                                              |     559 +
 head/crypto/heimdal/base/heimbase.h                                                              |     188 +
 head/crypto/heimdal/base/heimbasepriv.h                                                          |      91 +
 head/crypto/heimdal/base/heimqueue.h                                                             |     167 +
 head/crypto/heimdal/base/null.c                                                                  |      52 +
 head/crypto/heimdal/base/number.c                                                                |     127 +
 head/crypto/heimdal/base/string.c                                                                |     115 +
 head/crypto/heimdal/base/test_base.c                                                             |     152 +
 head/crypto/heimdal/base/version-script.map                                                      |      28 +
 head/crypto/heimdal/doc/copyright.texi                                                           |     518 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/doxygen.css                                          |     473 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/doxygen.png                                          |     Bin 
 head/crypto/heimdal/doc/doxyout/gssapi/html/graph_legend.dot                                     |      22 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/graph_legend.html                                    |      88 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/graph_legend.png                                     |     Bin 
 head/crypto/heimdal/doc/doxyout/gssapi/html/group__gssapi.html                                   |     892 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/gssapi_mechs_intro.html                              |      30 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/gssapi_services_intro.html                           |      43 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/index.html                                           |      36 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/internalvsmechname.html                              |      36 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/modules.html                                         |      29 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/pages.html                                           |      34 +
 head/crypto/heimdal/doc/doxyout/gssapi/html/tab_b.gif                                            |     Bin 
 head/crypto/heimdal/doc/doxyout/gssapi/html/tab_l.gif                                            |     Bin 
 head/crypto/heimdal/doc/doxyout/gssapi/html/tab_r.gif                                            |     Bin 
 head/crypto/heimdal/doc/doxyout/gssapi/html/tabs.css                                             |     102 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/__gss_c_attr_stream_sizes_oid_desc.3             |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_add_oid_set_member.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_canonicalize_name.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_import_name.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_init_sec_context.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_inquire_attrs_for_mech.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_inquire_saslname_for_mech.3                  |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_oid_equal.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_release_cred.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_release_iov_buffer.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_release_name.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_unwrap_iov.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_wrap.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_wrap_iov.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gss_wrap_iov_length.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gssapi.3                                         |     389 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gssapi_mechs_intro.3                             |      15 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/gssapi_services_intro.3                          |      65 +
 head/crypto/heimdal/doc/doxyout/gssapi/man/man3/internalvsmechname.3                             |      20 +
 head/crypto/heimdal/doc/doxyout/gssapi/manpages                                                  |      19 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/doxygen.css                                         |     473 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/doxygen.png                                         |     Bin 
 head/crypto/heimdal/doc/doxyout/hcrypto/html/example__evp__cipher_8c-example.html                |     173 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/examples.html                                       |      29 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/graph_legend.dot                                    |      22 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/graph_legend.html                                   |      88 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/graph_legend.png                                    |     Bin 
 head/crypto/heimdal/doc/doxyout/hcrypto/html/group__hcrypto__core.html                           |     190 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/group__hcrypto__des.html                            |     910 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/group__hcrypto__dh.html                             |     581 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/group__hcrypto__evp.html                            |    2366 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/group__hcrypto__misc.html                           |     106 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/group__hcrypto__rand.html                           |     425 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/group__hcrypto__rsa.html                            |     278 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/index.html                                          |      47 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/modules.html                                        |      35 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/page_des.html                                       |      45 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/page_dh.html                                        |      30 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/page_evp.html                                       |      30 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/page_rand.html                                      |      28 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/page_rsa.html                                       |      31 +
 head/crypto/heimdal/doc/doxyout/hcrypto/html/tab_b.gif                                           |     Bin 
 head/crypto/heimdal/doc/doxyout/hcrypto/html/tab_l.gif                                           |     Bin 
 head/crypto/heimdal/doc/doxyout/hcrypto/html/tab_r.gif                                           |     Bin 
 head/crypto/heimdal/doc/doxyout/hcrypto/html/tabs.css                                            |     102 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_cbc_cksum.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_cbc_encrypt.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_cfb64_encrypt.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_check_key_parity.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_ecb3_encrypt.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_ecb_encrypt.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_ede3_cbc_encrypt.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_encrypt.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_init_random_number_generator.3              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_is_weak_key.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_key_sched.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_new_random_key.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_pcbc_encrypt.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_random_key.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_set_key.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_set_key_checked.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_set_key_unchecked.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_set_odd_parity.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DES_string_to_key.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_check_pubkey.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_compute_key.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_free.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_generate_key.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_generate_parameters_ex.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_get_default_method.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_get_ex_data.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_ltm_method.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_new.3                                        |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_new_method.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_null_method.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_set_default_method.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_set_ex_data.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_set_method.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_size.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/DH_up_ref.3                                     |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_BytesToKey.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_block_size.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_cipher.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_cleanup.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_ctrl.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_flags.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_get_app_data.3                   |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_init.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_iv_length.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_key_length.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_mode.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_rand_key.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_set_app_data.3                   |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_CTX_set_key_length.3                 |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_block_size.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_iv_length.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CIPHER_key_length.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CipherFinal_ex.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CipherInit_ex.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_CipherUpdate.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_Digest.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_DigestFinal_ex.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_DigestInit_ex.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_DigestUpdate.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_MD_CTX_block_size.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_MD_CTX_cleanup.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_MD_CTX_create.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_MD_CTX_destroy.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_MD_CTX_init.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_MD_CTX_md.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_MD_CTX_size.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_MD_block_size.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_MD_size.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_aes_128_cbc.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_aes_128_cfb8.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_aes_192_cbc.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_aes_192_cfb8.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_aes_256_cbc.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_aes_256_cfb8.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_camellia_128_cbc.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_camellia_192_cbc.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_camellia_256_cbc.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_des_cbc.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_des_ede3_cbc.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_enc_null.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_get_cipherbyname.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_aes_128_cbc.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_aes_128_cfb8.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_aes_192_cbc.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_aes_192_cfb8.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_aes_256_cbc.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_aes_256_cfb8.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_camellia_128_cbc.3                  |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_camellia_192_cbc.3                  |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_camellia_256_cbc.3                  |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_des_cbc.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_des_ede3_cbc.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_md2.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_md4.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_md5.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_rc2_40_cbc.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_rc2_64_cbc.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_rc2_cbc.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_sha1.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_sha256.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_sha384.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_hcrypto_sha512.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_md2.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_md4.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_md5.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_md_null.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_rc2_40_cbc.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_rc2_64_cbc.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_rc2_cbc.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_rc4.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_rc4_40.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_sha.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_sha1.3                                      |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_sha256.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_sha384.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_sha512.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/EVP_wincrypt_des_ede3_cbc.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/OpenSSL_add_all_algorithms.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/OpenSSL_add_all_algorithms_conf.3               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/OpenSSL_add_all_algorithms_noconf.3             |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/PKCS5_PBKDF2_HMAC_SHA1.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_add.3                                      |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_bytes.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_cleanup.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_file_name.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_get_rand_method.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_load_file.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_pseudo_bytes.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_seed.3                                     |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_set_rand_engine.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_set_rand_method.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_status.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RAND_write_file.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RSA_free.3                                      |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RSA_get_app_data.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RSA_get_method.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RSA_new.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RSA_new_method.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RSA_set_app_data.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RSA_set_method.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/RSA_up_ref.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/hcrypto_core.3                                  |      76 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/hcrypto_des.3                                   |     392 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/hcrypto_dh.3                                    |     310 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/hcrypto_evp.3                                   |    1299 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/hcrypto_misc.3                                  |      44 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/hcrypto_rand.3                                  |     200 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/hcrypto_rsa.3                                   |     152 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/page_des.3                                      |      35 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/page_dh.3                                       |      10 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/page_evp.3                                      |       9 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/page_rand.3                                     |       6 +
 head/crypto/heimdal/doc/doxyout/hcrypto/man/man3/page_rsa.3                                      |      12 +
 head/crypto/heimdal/doc/doxyout/hcrypto/manpages                                                 |     153 +
 head/crypto/heimdal/doc/doxyout/hdb/html/annotated.html                                          |      35 +
 head/crypto/heimdal/doc/doxyout/hdb/html/doxygen.css                                             |     473 +
 head/crypto/heimdal/doc/doxyout/hdb/html/doxygen.png                                             |     Bin 
 head/crypto/heimdal/doc/doxyout/hdb/html/functions.html                                          |      85 +
 head/crypto/heimdal/doc/doxyout/hdb/html/functions_vars.html                                     |      85 +
 head/crypto/heimdal/doc/doxyout/hdb/html/graph_legend.dot                                        |      22 +
 head/crypto/heimdal/doc/doxyout/hdb/html/graph_legend.html                                       |      87 +
 head/crypto/heimdal/doc/doxyout/hdb/html/graph_legend.png                                        |     Bin 
 head/crypto/heimdal/doc/doxyout/hdb/html/index.html                                              |      33 +
 head/crypto/heimdal/doc/doxyout/hdb/html/struct_h_d_b.html                                       |     430 +
 head/crypto/heimdal/doc/doxyout/hdb/html/structhdb__entry__ex.html                               |      39 +
 head/crypto/heimdal/doc/doxyout/hdb/html/tab_b.gif                                               |     Bin 
 head/crypto/heimdal/doc/doxyout/hdb/html/tab_l.gif                                               |     Bin 
 head/crypto/heimdal/doc/doxyout/hdb/html/tab_r.gif                                               |     Bin 
 head/crypto/heimdal/doc/doxyout/hdb/html/tabs.css                                                |     102 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/HDB.3                                               |     185 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb__del.3                                          |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb__get.3                                          |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb__put.3                                          |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_auth_status.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_check_constrained_delegation.3                  |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_check_pkinit_ms_upn_match.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_check_s4u2self.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_close.3                                         |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_destroy.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_entry_ex.3                                      |      17 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_fetch_kvno.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_firstkey.3                                      |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_free.3                                          |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_get_realms.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_lock.3                                          |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_name.3                                          |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_nextkey.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_open.3                                          |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_password.3                                      |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_remove.3                                        |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_rename.3                                        |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_store.3                                         |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/man/man3/hdb_unlock.3                                        |       1 +
 head/crypto/heimdal/doc/doxyout/hdb/manpages                                                     |      24 +
 head/crypto/heimdal/doc/doxyout/hx509/html/doxygen.css                                           |     473 +
 head/crypto/heimdal/doc/doxyout/hx509/html/doxygen.png                                           |     Bin 
 head/crypto/heimdal/doc/doxyout/hx509/html/graph_legend.dot                                      |      22 +
 head/crypto/heimdal/doc/doxyout/hx509/html/graph_legend.html                                     |      88 +
 head/crypto/heimdal/doc/doxyout/hx509/html/graph_legend.png                                      |     Bin 
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509.html                                     |      89 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__ca.html                                 |    1179 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__cert.html                               |    1425 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__cms.html                                |     504 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__crypto.html                             |      92 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__env.html                                |     311 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__error.html                              |     308 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__keyset.html                             |     781 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__lock.html                               |      29 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__misc.html                               |      88 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__name.html                               |     470 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__peer.html                               |     237 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__print.html                              |     452 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__query.html                              |      29 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__revoke.html                             |     378 +
 head/crypto/heimdal/doc/doxyout/hx509/html/group__hx509__verify.html                             |     714 +
 head/crypto/heimdal/doc/doxyout/hx509/html/index.html                                            |      35 +
 head/crypto/heimdal/doc/doxyout/hx509/html/modules.html                                          |      44 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_ca.html                                          |      26 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_cert.html                                        |      28 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_cms.html                                         |      30 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_env.html                                         |      26 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_error.html                                       |      26 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_keyset.html                                      |      29 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_lock.html                                        |      26 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_name.html                                        |      32 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_peer.html                                        |      27 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_print.html                                       |      26 +
 head/crypto/heimdal/doc/doxyout/hx509/html/page_revoke.html                                      |      28 +
 head/crypto/heimdal/doc/doxyout/hx509/html/pages.html                                            |      50 +
 head/crypto/heimdal/doc/doxyout/hx509/html/tab_b.gif                                             |     Bin 
 head/crypto/heimdal/doc/doxyout/hx509/html/tab_l.gif                                             |     Bin 
 head/crypto/heimdal/doc/doxyout/hx509/html/tab_r.gif                                             |     Bin 
 head/crypto/heimdal/doc/doxyout/hx509/html/tabs.css                                              |     102 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509.3                                           |      45 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_bitstring_print.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca.3                                        |     573 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_sign.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_sign_self.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_add_crl_dp_uri.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_add_eku.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_add_san_hostname.3                   |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_add_san_jid.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_add_san_ms_upn.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_add_san_otherName.3                  |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_add_san_pkinit.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_add_san_rfc822name.3                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_free.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_init.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_ca.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_domaincontroller.3               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_notAfter.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_notAfter_lifetime.3              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_notBefore.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_proxy.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_serialnumber.3                   |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_spki.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_subject.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_template.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_set_unique.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_subject_expand.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ca_tbs_template_units.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert.3                                      |     700 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_binary.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_check_eku.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_cmp.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_find_subjectAltName_otherName.3        |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_free.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_SPKI.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_SPKI_AlgorithmIdentifier.3         |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_attribute.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_base_subject.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_friendly_name.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_issuer.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_issuer_unique_id.3                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_notAfter.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_notBefore.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_serialnumber.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_subject.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_get_subject_unique_id.3                |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_init.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_init_data.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_keyusage_print.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_ref.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cert_set_friendly_name.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_add.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_append.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_end_seq.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_filter.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_find.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_free.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_info.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_init.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_iter_f.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_merge.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_next_cert.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_start_seq.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_certs_store.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ci_print_names.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_clear_error_string.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cms.3                                       |     206 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cms_create_signed_1.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cms_envelope_1.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cms_unenvelope.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cms_unwrap_ContentInfo.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cms_verify_signed.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_cms_wrap_ContentInfo.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_context_free.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_context_init.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_context_set_missing_revoke.3                |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_crl_add_revoked_certs.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_crl_alloc.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_crl_free.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_crl_lifetime.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_crl_sign.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_crypto.3                                    |      40 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_env.3                                       |     143 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_env_add.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_env_add_binding.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_env_find.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_env_find_binding.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_env_free.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_env_lfind.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_err.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_error.3                                     |     129 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_free_error_string.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_free_octet_string_list.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_general_name_unparse.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_get_error_string.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_get_one_cert.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_keyset.3                                    |     373 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_lock.3                                      |       5 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_misc.3                                      |      40 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_name.3                                      |     235 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_name_binary.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_name_cmp.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_name_copy.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_name_expand.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_name_free.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_name_is_null_p.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_name_to_Name.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_name_to_string.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ocsp_request.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_ocsp_verify.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_oid_print.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_oid_sprint.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_parse_name.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_peer.3                                      |     113 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_peer_info_add_cms_alg.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_peer_info_alloc.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_peer_info_free.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_peer_info_set_cert.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_peer_info_set_cms_algs.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_print.3                                     |     211 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_print_cert.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_print_stdout.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_query.3                                     |       5 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_query_alloc.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_query_free.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_query_match_cmp_func.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_query_match_eku.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_query_match_friendly_name.3                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_query_match_issuer_serial.3                 |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_query_match_option.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_query_statistic_file.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_query_unparse_stats.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_revoke.3                                    |     171 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_revoke_add_crl.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_revoke_add_ocsp.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_revoke_free.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_revoke_init.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_revoke_ocsp_print.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_revoke_verify.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_set_error_string.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_set_error_stringv.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_unparse_der_name.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_validate_cert.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_validate_ctx_add_flags.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_validate_ctx_free.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_validate_ctx_init.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_validate_ctx_set_print.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify.3                                    |     309 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_attach_anchors.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_attach_revoke.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_ctx_f_allow_default_trustanchors.3   |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_destroy_ctx.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_hostname.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_init_ctx.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_path.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_set_max_depth.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_set_proxy_certificate.3              |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_set_strict_rfc3280_verification.3    |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_set_time.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_verify_signature.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/hx509_xfree.3                                     |       1 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_ca.3                                         |       6 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_cert.3                                       |      10 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_cms.3                                        |      18 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_env.3                                        |       6 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_error.3                                      |       6 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_keyset.3                                     |      25 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_lock.3                                       |       6 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_name.3                                       |      18 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_peer.3                                       |       8 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_print.3                                      |       6 +
 head/crypto/heimdal/doc/doxyout/hx509/man/man3/page_revoke.3                                     |      10 +
 head/crypto/heimdal/doc/doxyout/hx509/manpages                                                   |     172 +
 head/crypto/heimdal/doc/doxyout/krb5/html/annotated.html                                         |      35 +
 head/crypto/heimdal/doc/doxyout/krb5/html/doxygen.css                                            |     473 +
 head/crypto/heimdal/doc/doxyout/krb5/html/doxygen.png                                            |     Bin 
 head/crypto/heimdal/doc/doxyout/krb5/html/graph_legend.dot                                       |      22 +
 head/crypto/heimdal/doc/doxyout/krb5/html/graph_legend.html                                      |      89 +
 head/crypto/heimdal/doc/doxyout/krb5/html/graph_legend.png                                       |     Bin 
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5.html                                       |    2237 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__address.html                              |    1003 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__auth.html                                 |     320 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__ccache.html                               |    2264 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__credential.html                           |     858 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__crypto.html                               |    1262 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__deprecated.html                           |    1289 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__digest.html                               |      87 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__error.html                                |     239 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__keytab.html                               |    1055 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__pac.html                                  |     155 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__principal.html                            |    1180 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__storage.html                              |    2092 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__support.html                              |    1320 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__ticket.html                               |      73 +
 head/crypto/heimdal/doc/doxyout/krb5/html/group__krb5__v4compat.html                             |     134 +
 head/crypto/heimdal/doc/doxyout/krb5/html/index.html                                             |      37 +
 head/crypto/heimdal/doc/doxyout/krb5/html/krb5_ccache_intro.html                                 |      74 +
 head/crypto/heimdal/doc/doxyout/krb5/html/krb5_fileformats.html                                  |     154 +
 head/crypto/heimdal/doc/doxyout/krb5/html/krb5_init_creds_intro.html                             |      29 +
 head/crypto/heimdal/doc/doxyout/krb5/html/krb5_introduction.html                                 |     204 +
 head/crypto/heimdal/doc/doxyout/krb5/html/krb5_keytab_intro.html                                 |      82 +
 head/crypto/heimdal/doc/doxyout/krb5/html/krb5_principal_intro.html                              |      32 +
 head/crypto/heimdal/doc/doxyout/krb5/html/modules.html                                           |      45 +
 head/crypto/heimdal/doc/doxyout/krb5/html/pages.html                                             |      41 +
 head/crypto/heimdal/doc/doxyout/krb5/html/structkrb5__crypto__iov.html                           |      40 +
 head/crypto/heimdal/doc/doxyout/krb5/html/tab_b.gif                                              |     Bin 
 head/crypto/heimdal/doc/doxyout/krb5/html/tab_l.gif                                              |     Bin 
 head/crypto/heimdal/doc/doxyout/krb5/html/tab_r.gif                                              |     Bin 
 head/crypto/heimdal/doc/doxyout/krb5/html/tabs.css                                               |     102 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5.3                                             |    1092 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb524_convert_creds_kdc.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb524_convert_creds_kdc_ccache.3                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_acc_ops.3                                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_acl_match_file.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_acl_match_string.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_add_et_list.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_add_extra_addresses.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_add_ignore_addresses.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_addr2sockaddr.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_address.3                                     |     461 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_address_compare.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_address_order.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_address_prefixlen_boundary.3                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_address_search.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_allow_weak_crypto.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_anyaddr.3                                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_append_addresses.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_auth.3                                        |     138 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_auth_getremoteseqnumber.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_build_principal.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_c_enctype_compare.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_cache_end_seq_get.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_cache_get_first.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_cache_match.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_cache_next.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_clear_mcred.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_close.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_copy_cache.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_copy_creds.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_copy_match_f.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_default.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_default_name.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_destroy.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_end_seq_get.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_gen_new.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_config.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_flags.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_friendly_name.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_full_name.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_kdc_offset.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_lifetime.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_name.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_ops.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_prefix_ops.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_principal.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_type.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_get_version.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_initialize.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_last_change_time.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_move.3                                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_new_unique.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_next_cred.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_register.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_remove_cred.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_resolve.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_retrieve_cred.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_set_config.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_set_default_name.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_set_flags.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_set_friendly_name.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_set_kdc_offset.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_start_seq_get.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_store_cred.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_support_switch.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cc_switch.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ccache.3                                      |     888 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ccache_intro.3                                |      69 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cccol_cursor_free.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cccol_cursor_new.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cccol_cursor_next.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cccol_last_change_time.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_change_password.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_cksumtype_to_enctype.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_clear_error_message.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_clear_error_string.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_compare_creds.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_file_free.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_free_strings.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_get_bool.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_get_bool_default.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_get_list.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_get_string.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_get_string_default.3                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_get_strings.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_get_time.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_get_time_default.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_parse_file_multi.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_parse_string_multi.3                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_vget_bool.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_vget_bool_default.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_vget_list.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_vget_string.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_vget_string_default.3                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_vget_strings.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_vget_time.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_config_vget_time_default.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_address.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_addresses.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_context.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_creds.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_creds_contents.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_data.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_host_realm.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_keyblock.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_keyblock_contents.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_principal.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_copy_ticket.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_create_checksum_iov.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_credential.3                                  |     279 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_creds_get_ticket_flags.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_crypto.3                                      |     550 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_crypto_destroy.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_crypto_fx_cf2.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_crypto_getblocksize.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_crypto_getconfoundersize.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_crypto_getenctype.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_crypto_getpadsize.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_crypto_init.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_crypto_iov.3                                  |      17 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_data_alloc.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_data_cmp.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_data_copy.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_data_ct_cmp.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_data_free.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_data_realloc.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_data_zero.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_decrypt_iov_ivec.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_deprecated.3                                  |     269 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_digest.3                                      |      38 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_digest_probe.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_eai_to_heim_errno.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_encrypt_iov_ivec.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_enctype_disable.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_enctype_enable.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_enctype_valid.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_enctypes_compatible_keys.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_error.3                                       |     105 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_expand_hostname.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_expand_hostname_realms.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_fcc_ops.3                                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_fileformats.3                                 |     233 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_address.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_addresses.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_config_files.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_context.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_cred_contents.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_creds.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_creds_contents.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_data.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_data_contents.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_error_string.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_host_realm.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_keyblock.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_keyblock_contents.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_principal.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_ticket.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_free_unparsed_name.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_fwd_tgt_creds.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_generate_subkey.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_generate_subkey_extended.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_cred_from_kdc.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_cred_from_kdc_opt.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_default_config_files.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_default_in_tkt_etypes.3                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_dns_canonicalize_hostname.3               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_extra_addresses.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_fcache_version.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_forwarded_creds.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_ignore_addresses.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_in_tkt_with_keytab.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_in_tkt_with_password.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_in_tkt_with_skey.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_init_creds_keyblock.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_init_creds_keytab.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_init_creds_opt_alloc.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_init_creds_opt_free.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_init_creds_opt_get_error.3                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_init_creds_opt_init.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_init_creds_password.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_kdc_sec_offset.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_max_time_skew.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_use_admin_kdc.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_get_validated_creds.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_h_addr2addr.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_h_addr2sockaddr.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_h_errno_to_heim_errno.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_context.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_creds_free.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_creds_get.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_creds_get_error.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_creds_init.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_creds_intro.3                            |       8 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_creds_set_keytab.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_creds_set_password.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_creds_set_service.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_creds_step.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_init_ets.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_introduction.3                                |     259 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_is_config_principal.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_is_thread_safe.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kerberos_enctypes.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_keyblock_get_enctype.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_keyblock_init.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_keyblock_zero.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_keytab.3                                      |     486 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_keytab_intro.3                                |      74 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_keytab_key_proc.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_keytype_to_enctypes.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_keytype_to_enctypes_default.3                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_keytype_to_string.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_krbhst_get_addrinfo.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_add_entry.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_close.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_compare.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_copy_entry_contents.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_default.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_default_modify_name.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_default_name.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_destroy.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_end_seq_get.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_free_entry.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_get_entry.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_get_full_name.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_get_name.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_get_type.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_have_content.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_next_entry.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_read_service_key.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_register.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_remove_entry.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_resolve.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kt_start_seq_get.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_kuserok.3                                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_make_addrport.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_make_principal.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_max_sockaddr_size.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_mcc_ops.3                                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_pac.3                                         |      66 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_pac_get_buffer.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_pac_verify.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_parse_address.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_parse_name.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_parse_name_flags.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_parse_nametype.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_password_key_proc.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_plugin_register.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_prepend_config_files_default.3                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_princ_realm.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_princ_set_realm.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal.3                                   |     519 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal_compare.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal_compare_any_realm.3                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal_get_num_comp.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal_get_realm.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal_get_type.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal_intro.3                             |      17 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal_is_krbtgt.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal_match.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal_set_realm.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_principal_set_type.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_print_address.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_random_to_key.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_rd_req_ctx.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_rd_req_in_ctx_alloc.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_rd_req_in_set_keytab.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_rd_req_in_set_pac_check.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_rd_req_out_ctx_free.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_rd_req_out_get_server.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_realm_compare.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_address.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_addrs.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_authdata.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_creds.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_creds_tag.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_data.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_int16.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_int32.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_int8.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_keyblock.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_principal.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_string.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_stringz.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_times.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_uint16.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_uint32.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ret_uint8.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_config_files.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_default_in_tkt_etypes.3                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_dns_canonicalize_hostname.3               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_error_message.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_error_string.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_extra_addresses.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_fcache_version.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_home_dir_access.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_ignore_addresses.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_kdc_sec_offset.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_max_time_skew.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_password.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_real_time.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_set_use_admin_kdc.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_sname_to_principal.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_sockaddr2address.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_sockaddr2port.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_sockaddr_uninteresting.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage.3                                     |    1055 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_clear_flags.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_emem.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_free.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_from_data.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_from_fd.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_from_mem.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_from_readonly_mem.3                   |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_get_byteorder.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_get_eof_code.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_is_flags.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_read.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_seek.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_set_byteorder.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_set_eof_code.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_set_flags.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_set_max_alloc.3                       |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_to_data.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_truncate.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_storage_write.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_address.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_addrs.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_authdata.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_creds.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_creds_tag.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_data.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_int16.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_int32.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_int8.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_keyblock.3                              |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_principal.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_string.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_stringz.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_times.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_uint16.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_uint32.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_store_uint8.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_string_to_keytype.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_support.3                                     |     650 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ticket.3                                      |      34 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ticket_get_authorization_data_type.3          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ticket_get_client.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ticket_get_endtime.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ticket_get_flags.3                            |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_ticket_get_server.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_unparse_name.3                                |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_unparse_name_fixed.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_unparse_name_fixed_flags.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_unparse_name_fixed_short.3                    |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_unparse_name_flags.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_unparse_name_short.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_v4compat.3                                    |      60 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_verify_checksum_iov.3                         |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_vset_error_string.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/man/man3/krb5_vwarn.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/krb5/manpages                                                    |     360 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/annotated.html                                         |      39 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/doxygen.css                                            |     473 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/doxygen.png                                            |     Bin 
 head/crypto/heimdal/doc/doxyout/ntlm/html/examples.html                                          |      30 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/functions.html                                         |      78 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/functions_vars.html                                    |      78 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/graph_legend.dot                                       |      22 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/graph_legend.html                                      |      89 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/graph_legend.png                                       |     Bin 
 head/crypto/heimdal/doc/doxyout/ntlm/html/group__ntlm__core.html                                 |     936 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/index.html                                             |      37 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/modules.html                                           |      30 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/structntlm__buf.html                                   |      82 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/structntlm__type1.html                                 |     118 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/structntlm__type2.html                                 |     159 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/structntlm__type2__coll__graph.map                     |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/structntlm__type2__coll__graph.md5                     |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/structntlm__type2__coll__graph.png                     |     Bin 
 head/crypto/heimdal/doc/doxyout/ntlm/html/structntlm__type3.html                                 |     194 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/structntlm__type3__coll__graph.map                     |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/structntlm__type3__coll__graph.md5                     |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/structntlm__type3__coll__graph.png                     |     Bin 
 head/crypto/heimdal/doc/doxyout/ntlm/html/tab_b.gif                                              |     Bin 
 head/crypto/heimdal/doc/doxyout/ntlm/html/tab_l.gif                                              |     Bin 
 head/crypto/heimdal/doc/doxyout/ntlm/html/tab_r.gif                                              |     Bin 
 head/crypto/heimdal/doc/doxyout/ntlm/html/tabs.css                                               |     102 +
 head/crypto/heimdal/doc/doxyout/ntlm/html/test__ntlm_8c-example.html                             |     408 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/challenge.3                                        |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/context.3                                          |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/data.3                                             |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/domain.3                                           |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/flags.3                                            |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_build_ntlm1_master.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_build_ntlm2_master.3                     |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_calculate_lm2.3                          |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_calculate_ntlm1.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_calculate_ntlm2.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_decode_targetinfo.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_encode_targetinfo.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_encode_type1.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_encode_type2.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_encode_type3.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_free_buf.3                               |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_free_targetinfo.3                        |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_free_type1.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_free_type2.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_free_type3.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_keyex_unwrap.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_nt_key.3                                 |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_ntlmv2_key.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/heim_ntlm_verify_ntlm2.3                           |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/hostname.3                                         |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/length.3                                           |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/lm.3                                               |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/ntlm.3                                             |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/ntlm_buf.3                                         |      48 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/ntlm_core.3                                        |     421 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/ntlm_type1.3                                       |      68 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/ntlm_type2.3                                       |      80 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/ntlm_type3.3                                       |      96 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/os.3                                               |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/sessionkey.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/targetinfo.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/targetname.3                                       |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/username.3                                         |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/man/man3/ws.3                                               |       1 +
 head/crypto/heimdal/doc/doxyout/ntlm/manpages                                                    |      39 +
 head/crypto/heimdal/doc/doxyout/wind/html/doxygen.css                                            |     473 +
 head/crypto/heimdal/doc/doxyout/wind/html/doxygen.png                                            |     Bin 
 head/crypto/heimdal/doc/doxyout/wind/html/graph_legend.dot                                       |      22 +
 head/crypto/heimdal/doc/doxyout/wind/html/graph_legend.html                                      |      87 +
 head/crypto/heimdal/doc/doxyout/wind/html/graph_legend.png                                       |     Bin 
 head/crypto/heimdal/doc/doxyout/wind/html/group__wind.html                                       |     680 +
 head/crypto/heimdal/doc/doxyout/wind/html/index.html                                             |      30 +
 head/crypto/heimdal/doc/doxyout/wind/html/modules.html                                           |      28 +
 head/crypto/heimdal/doc/doxyout/wind/html/tab_b.gif                                              |     Bin 
 head/crypto/heimdal/doc/doxyout/wind/html/tab_l.gif                                              |     Bin 
 head/crypto/heimdal/doc/doxyout/wind/html/tab_r.gif                                              |     Bin 
 head/crypto/heimdal/doc/doxyout/wind/html/tabs.css                                               |     102 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind.3                                             |     326 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_profile.3                                     |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_punycode_label_toascii.3                      |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_stringprep.3                                  |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_ucs2read.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_ucs2utf8.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_ucs2utf8_length.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_ucs2write.3                                   |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_ucs4utf8.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_ucs4utf8_length.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_utf8ucs2.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_utf8ucs2_length.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_utf8ucs4.3                                    |       1 +
 head/crypto/heimdal/doc/doxyout/wind/man/man3/wind_utf8ucs4_length.3                             |       1 +
 head/crypto/heimdal/doc/doxyout/wind/manpages                                                    |      14 +
 head/crypto/heimdal/doc/gssapi.din                                                               |      16 +
 head/crypto/heimdal/doc/header.html                                                              |      10 +
 head/crypto/heimdal/doc/wind.din                                                                 |      15 +
 head/crypto/heimdal/include/crypto-headers.h                                                     |      57 +
 head/crypto/heimdal/include/heim_threads.h                                                       |     175 +
 head/crypto/heimdal/include/krb5-types.cross                                                     |      61 +
 head/crypto/heimdal/kadmin/rpc.c                                                                 |    1110 +
 head/crypto/heimdal/kcm/kcm-protos.h                                                             |     306 +
 head/crypto/heimdal/kcm/sessions.c                                                               |      83 +
 head/crypto/heimdal/kdc/announce.c                                                               |     544 +
 head/crypto/heimdal/kdc/digest-service.c                                                         |     282 +
 head/crypto/heimdal/kuser/kcc-commands.in                                                        |     239 +
 head/crypto/heimdal/kuser/kcc.c                                                                  |     165 +
 head/crypto/heimdal/kuser/kdigest.8                                                              |     256 +
 head/crypto/heimdal/kuser/kimpersonate.8                                                         |     107 +
 head/crypto/heimdal/kuser/kswitch.1                                                              |      85 +
 head/crypto/heimdal/kuser/kswitch.c                                                              |     172 +
 head/crypto/heimdal/lib/asn1/asn1-template.h                                                     |     141 +
 head/crypto/heimdal/lib/asn1/asn1parse.c                                                         |    2861 +
 head/crypto/heimdal/lib/asn1/asn1parse.h                                                         |     249 +
 head/crypto/heimdal/lib/asn1/asn1parse.y                                                         |    1037 +
 head/crypto/heimdal/lib/asn1/cms.asn1                                                            |     157 +
 head/crypto/heimdal/lib/asn1/cms.opt                                                             |       2 +
 head/crypto/heimdal/lib/asn1/der-private.h                                                       |      82 +
 head/crypto/heimdal/lib/asn1/gen_template.c                                                      |     918 +
 head/crypto/heimdal/lib/asn1/krb5.asn1                                                           |     825 +
 head/crypto/heimdal/lib/asn1/krb5.opt                                                            |       6 +
 head/crypto/heimdal/lib/asn1/template.c                                                          |    1119 +
 head/crypto/heimdal/lib/asn1/version-script.map                                                  |       6 +
 head/crypto/heimdal/lib/gssapi/gssapi/gssapi_ntlm.h                                              |      41 +
 head/crypto/heimdal/lib/gssapi/gssapi/gssapi_oid.h                                               |     245 +
 head/crypto/heimdal/lib/gssapi/gsstool.c                                                         |     255 +
 head/crypto/heimdal/lib/gssapi/krb5/aeap.c                                                       |     102 +
 head/crypto/heimdal/lib/gssapi/krb5/authorize_localname.c                                        |      66 +
 head/crypto/heimdal/lib/gssapi/krb5/creds.c                                                      |     255 +
 head/crypto/heimdal/lib/gssapi/krb5/pname_to_uid.c                                               |      85 +
 head/crypto/heimdal/lib/gssapi/krb5/store_cred.c                                                 |     116 +
 head/crypto/heimdal/lib/gssapi/mech/compat.h                                                     |      94 +
 head/crypto/heimdal/lib/gssapi/mech/context.c                                                    |     163 +
 head/crypto/heimdal/lib/gssapi/mech/context.h                                                    |      41 +
 head/crypto/heimdal/lib/gssapi/mech/cred.h                                                       |      57 +
 head/crypto/heimdal/lib/gssapi/mech/doxygen.c                                                    |     132 +
 head/crypto/heimdal/lib/gssapi/mech/gss_accept_sec_context.c                                     |     308 +
 head/crypto/heimdal/lib/gssapi/mech/gss_acquire_cred.c                                           |     168 +
 head/crypto/heimdal/lib/gssapi/mech/gss_acquire_cred_ext.c                                       |     193 +
 head/crypto/heimdal/lib/gssapi/mech/gss_acquire_cred_with_password.c                             |     118 +
 head/crypto/heimdal/lib/gssapi/mech/gss_add_cred.c                                               |     186 +
 head/crypto/heimdal/lib/gssapi/mech/gss_add_cred_with_password.c                                 |     150 +
 head/crypto/heimdal/lib/gssapi/mech/gss_add_oid_set_member.c                                     |      84 +
 head/crypto/heimdal/lib/gssapi/mech/gss_aeap.c                                                   |     216 +
 head/crypto/heimdal/lib/gssapi/mech/gss_authorize_localname.c                                    |     187 +
 head/crypto/heimdal/lib/gssapi/mech/gss_buffer_set.c                                             |     124 +
 head/crypto/heimdal/lib/gssapi/mech/gss_canonicalize_name.c                                      |     111 +
 head/crypto/heimdal/lib/gssapi/mech/gss_compare_name.c                                           |      76 +
 head/crypto/heimdal/lib/gssapi/mech/gss_context_time.c                                           |      40 +
 head/crypto/heimdal/lib/gssapi/mech/gss_create_empty_oid_set.c                                   |      51 +
 head/crypto/heimdal/lib/gssapi/mech/gss_cred.c                                                   |     224 +
 head/crypto/heimdal/lib/gssapi/mech/gss_decapsulate_token.c                                      |      72 +
 head/crypto/heimdal/lib/gssapi/mech/gss_delete_name_attribute.c                                  |      65 +
 head/crypto/heimdal/lib/gssapi/mech/gss_delete_sec_context.c                                     |      57 +
 head/crypto/heimdal/lib/gssapi/mech/gss_display_name.c                                           |      82 +
 head/crypto/heimdal/lib/gssapi/mech/gss_display_name_ext.c                                       |      68 +
 head/crypto/heimdal/lib/gssapi/mech/gss_display_status.c                                         |     211 +
 head/crypto/heimdal/lib/gssapi/mech/gss_duplicate_name.c                                         |      95 +
 head/crypto/heimdal/lib/gssapi/mech/gss_duplicate_oid.c                                          |      68 +
 head/crypto/heimdal/lib/gssapi/mech/gss_encapsulate_token.c                                      |      66 +
 head/crypto/heimdal/lib/gssapi/mech/gss_export_name.c                                            |      54 +
 head/crypto/heimdal/lib/gssapi/mech/gss_export_name_composite.c                                  |      66 +
 head/crypto/heimdal/lib/gssapi/mech/gss_export_sec_context.c                                     |      77 +
 head/crypto/heimdal/lib/gssapi/mech/gss_get_mic.c                                                |      51 +
 head/crypto/heimdal/lib/gssapi/mech/gss_get_name_attribute.c                                     |      81 +
 head/crypto/heimdal/lib/gssapi/mech/gss_import_name.c                                            |     291 +
 head/crypto/heimdal/lib/gssapi/mech/gss_import_sec_context.c                                     |      82 +
 head/crypto/heimdal/lib/gssapi/mech/gss_indicate_mechs.c                                         |      64 +
 head/crypto/heimdal/lib/gssapi/mech/gss_init_sec_context.c                                       |     212 +
 head/crypto/heimdal/lib/gssapi/mech/gss_inquire_context.c                                        |     105 +
 head/crypto/heimdal/lib/gssapi/mech/gss_inquire_cred.c                                           |     195 +
 head/crypto/heimdal/lib/gssapi/mech/gss_inquire_cred_by_mech.c                                   |      92 +
 head/crypto/heimdal/lib/gssapi/mech/gss_inquire_cred_by_oid.c                                    |      86 +
 head/crypto/heimdal/lib/gssapi/mech/gss_inquire_mechs_for_name.c                                 |      76 +
 head/crypto/heimdal/lib/gssapi/mech/gss_inquire_name.c                                           |      79 +
 head/crypto/heimdal/lib/gssapi/mech/gss_inquire_names_for_mech.c                                 |      73 +
 head/crypto/heimdal/lib/gssapi/mech/gss_inquire_sec_context_by_oid.c                             |      70 +
 head/crypto/heimdal/lib/gssapi/mech/gss_krb5.c                                                   |     941 +
 head/crypto/heimdal/lib/gssapi/mech/gss_mech_switch.c                                            |     438 +
 head/crypto/heimdal/lib/gssapi/mech/gss_mo.c                                                     |     635 +
 head/crypto/heimdal/lib/gssapi/mech/gss_names.c                                                  |     110 +
 head/crypto/heimdal/lib/gssapi/mech/gss_oid.c                                                    |     266 +
 head/crypto/heimdal/lib/gssapi/mech/gss_oid_equal.c                                              |      58 +
 head/crypto/heimdal/lib/gssapi/mech/gss_oid_to_str.c                                             |      98 +
 head/crypto/heimdal/lib/gssapi/mech/gss_pname_to_uid.c                                           |     167 +
 head/crypto/heimdal/lib/gssapi/mech/gss_process_context_token.c                                  |      41 +
 head/crypto/heimdal/lib/gssapi/mech/gss_pseudo_random.c                                          |      70 +
 head/crypto/heimdal/lib/gssapi/mech/gss_release_buffer.c                                         |      42 +
 head/crypto/heimdal/lib/gssapi/mech/gss_release_cred.c                                           |      73 +
 head/crypto/heimdal/lib/gssapi/mech/gss_release_name.c                                           |      74 +
 head/crypto/heimdal/lib/gssapi/mech/gss_release_oid.c                                            |      58 +
 head/crypto/heimdal/lib/gssapi/mech/gss_release_oid_set.c                                        |      44 +
 head/crypto/heimdal/lib/gssapi/mech/gss_seal.c                                                   |      45 +
 head/crypto/heimdal/lib/gssapi/mech/gss_set_cred_option.c                                        |     118 +
 head/crypto/heimdal/lib/gssapi/mech/gss_set_name_attribute.c                                     |      69 +
 head/crypto/heimdal/lib/gssapi/mech/gss_set_sec_context_option.c                                 |      70 +
 head/crypto/heimdal/lib/gssapi/mech/gss_sign.c                                                   |      41 +
 head/crypto/heimdal/lib/gssapi/mech/gss_store_cred.c                                             |      94 +
 head/crypto/heimdal/lib/gssapi/mech/gss_test_oid_set_member.c                                    |      46 +
 head/crypto/heimdal/lib/gssapi/mech/gss_unseal.c                                                 |      43 +
 head/crypto/heimdal/lib/gssapi/mech/gss_unwrap.c                                                 |      45 +
 head/crypto/heimdal/lib/gssapi/mech/gss_utils.c                                                  |      78 +
 head/crypto/heimdal/lib/gssapi/mech/gss_verify.c                                                 |      42 +
 head/crypto/heimdal/lib/gssapi/mech/gss_verify_mic.c                                             |      52 +
 head/crypto/heimdal/lib/gssapi/mech/gss_wrap.c                                                   |      71 +
 head/crypto/heimdal/lib/gssapi/mech/gss_wrap_size_limit.c                                        |      52 +
 head/crypto/heimdal/lib/gssapi/mech/gssapi.asn1                                                  |      12 +
 head/crypto/heimdal/lib/gssapi/mech/mech.5                                                       |      94 +
 head/crypto/heimdal/lib/gssapi/mech/mech.cat5                                                    |      61 +
 head/crypto/heimdal/lib/gssapi/mech/mech_locl.h                                                  |      81 +
 head/crypto/heimdal/lib/gssapi/mech/mech_switch.h                                                |      43 +
 head/crypto/heimdal/lib/gssapi/mech/mechqueue.h                                                  |      97 +
 head/crypto/heimdal/lib/gssapi/mech/name.h                                                       |      48 +
 head/crypto/heimdal/lib/gssapi/mech/utils.h                                                      |      33 +
 head/crypto/heimdal/lib/gssapi/ntlm/creds.c                                                      |     160 +
 head/crypto/heimdal/lib/gssapi/ntlm/inquire_sec_context_by_oid.c                                 |      90 +
 head/crypto/heimdal/lib/gssapi/ntlm/iter_cred.c                                                  |      99 +
 head/crypto/heimdal/lib/gssapi/ntlm/kdc.c                                                        |     438 +
 head/crypto/heimdal/lib/gssapi/spnego/spnego.opt                                                 |       1 +
 head/crypto/heimdal/lib/hdb/hdb-keytab.c                                                         |     231 +
 head/crypto/heimdal/lib/hdb/hdb-mitdb.c                                                          |     818 +
 head/crypto/heimdal/lib/hdb/hdb-sqlite.c                                                         |     879 +
 head/crypto/heimdal/lib/hdb/version-script.map                                                   |     107 +
 head/crypto/heimdal/lib/heimdal/NTMakefile                                                       |      93 +
 head/crypto/heimdal/lib/heimdal/dllmain.c                                                        |      40 +
 head/crypto/heimdal/lib/heimdal/heimdal-version.rc                                               |      36 +
 head/crypto/heimdal/lib/hx509/char_map.h                                                         |      45 +
 head/crypto/heimdal/lib/hx509/ocsp.opt                                                           |       2 +
 head/crypto/heimdal/lib/hx509/pkcs10.opt                                                         |       1 +
 head/crypto/heimdal/lib/hx509/quote.py                                                           |     101 +
 head/crypto/heimdal/lib/hx509/sel-gram.y                                                         |     114 +
 head/crypto/heimdal/lib/hx509/sel-lex.l                                                          |     140 +
 head/crypto/heimdal/lib/hx509/sel.c                                                              |     233 +
 head/crypto/heimdal/lib/hx509/sel.h                                                              |      82 +
 head/crypto/heimdal/lib/ipc/Makefile.am                                                          |      67 +
 head/crypto/heimdal/lib/ipc/Makefile.in                                                          |    1073 +
 head/crypto/heimdal/lib/ipc/client.c                                                             |     574 +
 head/crypto/heimdal/lib/ipc/common.c                                                             |     204 +
 head/crypto/heimdal/lib/ipc/heim-ipc.h                                                           |     130 +
 head/crypto/heimdal/lib/ipc/heim_ipc.defs                                                        |      66 +
 head/crypto/heimdal/lib/ipc/heim_ipc_async.defs                                                  |      56 +
 head/crypto/heimdal/lib/ipc/heim_ipc_reply.defs                                                  |      51 +
 head/crypto/heimdal/lib/ipc/heim_ipc_types.h                                                     |      44 +
 head/crypto/heimdal/lib/ipc/hi_locl.h                                                            |      83 +
 head/crypto/heimdal/lib/ipc/server.c                                                             |    1187 +
 head/crypto/heimdal/lib/ipc/tc.c                                                                 |     127 +
 head/crypto/heimdal/lib/ipc/ts-http.c                                                            |     136 +
 head/crypto/heimdal/lib/ipc/ts.c                                                                 |     105 +
 head/crypto/heimdal/lib/krb5/ccache_plugin.h                                                     |      39 +
 head/crypto/heimdal/lib/krb5/crypto-aes.c                                                        |     170 +
 head/crypto/heimdal/lib/krb5/crypto-algs.c                                                       |      87 +
 head/crypto/heimdal/lib/krb5/crypto-arcfour.c                                                    |     325 +
 head/crypto/heimdal/lib/krb5/crypto-des-common.c                                                 |     152 +
 head/crypto/heimdal/lib/krb5/crypto-des.c                                                        |     377 +
 head/crypto/heimdal/lib/krb5/crypto-des3.c                                                       |     226 +
 head/crypto/heimdal/lib/krb5/crypto-evp.c                                                        |     182 +
 head/crypto/heimdal/lib/krb5/crypto-null.c                                                       |      97 +
 head/crypto/heimdal/lib/krb5/crypto-pk.c                                                         |     301 +
 head/crypto/heimdal/lib/krb5/crypto-rand.c                                                       |     109 +
 head/crypto/heimdal/lib/krb5/crypto-stubs.c                                                      |     102 +
 head/crypto/heimdal/lib/krb5/crypto.h                                                            |     179 +
 head/crypto/heimdal/lib/krb5/deprecated.c                                                        |     609 +
 head/crypto/heimdal/lib/krb5/expand_path.c                                                       |     500 +
 head/crypto/heimdal/lib/krb5/pcache.c                                                            |      66 +
 head/crypto/heimdal/lib/krb5/salt-aes.c                                                          |     103 +
 head/crypto/heimdal/lib/krb5/salt-arcfour.c                                                      |     112 +
 head/crypto/heimdal/lib/krb5/salt-des.c                                                          |     224 +
 head/crypto/heimdal/lib/krb5/salt-des3.c                                                         |     150 +
 head/crypto/heimdal/lib/krb5/salt.c                                                              |     305 +
 head/crypto/heimdal/lib/krb5/scache.c                                                            |    1451 +
 head/crypto/heimdal/lib/krb5/send_to_kdc_plugin.h                                                |      58 +
 head/crypto/heimdal/lib/krb5/store-int.c                                                         |      58 +
 head/crypto/heimdal/lib/ntlm/ntlm_err.et                                                         |      24 +
 head/crypto/heimdal/lib/roken/cloexec.c                                                          |      66 +
 head/crypto/heimdal/lib/roken/ct.c                                                               |      64 +
 head/crypto/heimdal/lib/roken/doxygen.c                                                          |      44 +
 head/crypto/heimdal/lib/roken/qsort.c                                                            |     203 +
 head/crypto/heimdal/lib/roken/rand.c                                                             |      48 +
 head/crypto/heimdal/lib/roken/rkpty.c                                                            |     381 +
 head/crypto/heimdal/lib/roken/search.hin                                                         |      44 +
 head/crypto/heimdal/lib/roken/strerror_r.c                                                       |      84 +
 head/crypto/heimdal/lib/roken/tsearch.c                                                          |     180 +
 head/crypto/heimdal/lib/roken/version-script.map                                                 |     203 +
 head/crypto/heimdal/lib/roken/xfree.c                                                            |      42 +
 head/crypto/heimdal/lib/sqlite/Makefile.am                                                       |      13 +
 head/crypto/heimdal/lib/sqlite/Makefile.in                                                       |     881 +
 head/crypto/heimdal/lib/sqlite/NTMakefile                                                        |      47 +
 head/crypto/heimdal/lib/sqlite/sqlite3.c                                                         |  131072 ++++++++++
 head/crypto/heimdal/lib/sqlite/sqlite3.h                                                         |    6757 +
 head/crypto/heimdal/lib/sqlite/sqlite3ext.h                                                      |     426 +
 head/crypto/heimdal/lib/wind/ChangeLog                                                           |     136 +
 head/crypto/heimdal/lib/wind/CompositionExclusions-3.2.0.txt                                     |     176 +
 head/crypto/heimdal/lib/wind/DerivedNormalizationProps.txt                                       |    2574 +
 head/crypto/heimdal/lib/wind/Makefile.am                                                         |     149 +
 head/crypto/heimdal/lib/wind/Makefile.in                                                         |    1305 +
 head/crypto/heimdal/lib/wind/NTMakefile                                                          |     159 +
 head/crypto/heimdal/lib/wind/NormalizationCorrections.txt                                        |      43 +
 head/crypto/heimdal/lib/wind/NormalizationTest.txt                                               |   17166 +
 head/crypto/heimdal/lib/wind/UnicodeData.py                                                      |      57 +
 head/crypto/heimdal/lib/wind/UnicodeData.txt                                                     |   15100 +
 head/crypto/heimdal/lib/wind/bidi.c                                                              |      92 +
 head/crypto/heimdal/lib/wind/bidi_table.c                                                        |     411 +
 head/crypto/heimdal/lib/wind/bidi_table.h                                                        |      20 +
 head/crypto/heimdal/lib/wind/combining.c                                                         |      62 +
 head/crypto/heimdal/lib/wind/combining_table.c                                                   |     363 +
 head/crypto/heimdal/lib/wind/combining_table.h                                                   |      17 +
 head/crypto/heimdal/lib/wind/doxygen.c                                                           |      47 +
 head/crypto/heimdal/lib/wind/errorlist.c                                                         |      77 +
 head/crypto/heimdal/lib/wind/errorlist_table.c                                                   |      89 +
 head/crypto/heimdal/lib/wind/errorlist_table.h                                                   |      19 +
 head/crypto/heimdal/lib/wind/gen-bidi.py                                                         |     102 +
 head/crypto/heimdal/lib/wind/gen-combining.py                                                    |     105 +
 head/crypto/heimdal/lib/wind/gen-errorlist.py                                                    |     121 +
 head/crypto/heimdal/lib/wind/gen-map.py                                                          |     158 +
 head/crypto/heimdal/lib/wind/gen-normalize.py                                                    |     211 +
 head/crypto/heimdal/lib/wind/gen-punycode-examples.py                                            |     131 +
 head/crypto/heimdal/lib/wind/generate.py                                                         |      81 +
 head/crypto/heimdal/lib/wind/idn-lookup.c                                                        |     162 +
 head/crypto/heimdal/lib/wind/ldap.c                                                              |      91 +
 head/crypto/heimdal/lib/wind/libwind-exports.def                                                 |      26 +
 head/crypto/heimdal/lib/wind/map.c                                                               |      85 +
 head/crypto/heimdal/lib/wind/map_table.c                                                         |    2613 +
 head/crypto/heimdal/lib/wind/map_table.h                                                         |      22 +
 head/crypto/heimdal/lib/wind/normalize.c                                                         |     325 +
 head/crypto/heimdal/lib/wind/normalize_table.c                                                   |   22977 +
 head/crypto/heimdal/lib/wind/normalize_table.h                                                   |      33 +
 head/crypto/heimdal/lib/wind/punycode.c                                                          |     168 +
 head/crypto/heimdal/lib/wind/punycode_examples.c                                                 |      34 +
 head/crypto/heimdal/lib/wind/punycode_examples.h                                                 |      21 +
 head/crypto/heimdal/lib/wind/rfc3454.py                                                          |      60 +
 head/crypto/heimdal/lib/wind/rfc3454.txt                                                         |    5099 +
 head/crypto/heimdal/lib/wind/rfc3490.txt                                                         |    1235 +
 head/crypto/heimdal/lib/wind/rfc3491.txt                                                         |     395 +
 head/crypto/heimdal/lib/wind/rfc3492.txt                                                         |    1963 +
 head/crypto/heimdal/lib/wind/rfc4013.txt                                                         |     339 +
 head/crypto/heimdal/lib/wind/rfc4518.py                                                          |     150 +
 head/crypto/heimdal/lib/wind/rfc4518.txt                                                         |     787 +
 head/crypto/heimdal/lib/wind/stringprep.c                                                        |     145 +
 head/crypto/heimdal/lib/wind/stringprep.py                                                       |      92 +
 head/crypto/heimdal/lib/wind/test-bidi.c                                                         |      81 +
 head/crypto/heimdal/lib/wind/test-ldap.c                                                         |     128 +
 head/crypto/heimdal/lib/wind/test-map.c                                                          |      98 +
 head/crypto/heimdal/lib/wind/test-normalize.c                                                    |     176 +
 head/crypto/heimdal/lib/wind/test-prohibited.c                                                   |     138 +
 head/crypto/heimdal/lib/wind/test-punycode.c                                                     |      83 +
 head/crypto/heimdal/lib/wind/test-rw.c                                                           |     186 +
 head/crypto/heimdal/lib/wind/test-utf8.c                                                         |     174 +
 head/crypto/heimdal/lib/wind/utf8.c                                                              |     516 +
 head/crypto/heimdal/lib/wind/util.py                                                             |      48 +
 head/crypto/heimdal/lib/wind/version-script.map                                                  |      29 +
 head/crypto/heimdal/lib/wind/wind.h                                                              |      86 +
 head/crypto/heimdal/lib/wind/wind_err.et                                                         |      23 +
 head/crypto/heimdal/lib/wind/windlocl.h                                                          |      67 +
 head/etc/rc.d/kfd                                                                                |      19 +
 head/gnu/lib/libsupc++/Version.map                                                               |     137 +
 head/kerberos5/lib/libasn1/version.map                                                           |    1592 +
 head/kerberos5/lib/libgssapi_krb5/gss_oid.c                                                      |     227 +
 head/kerberos5/lib/libgssapi_spnego/freebsd_compat.c                                             |      84 +
 head/kerberos5/lib/libheimbase/Makefile                                                          |      26 +
 head/kerberos5/lib/libheimipcc/Makefile                                                          |      20 +
 head/kerberos5/lib/libheimipcs/Makefile                                                          |      21 +
 head/kerberos5/lib/libheimsqlite/Makefile                                                        |      11 +
 head/kerberos5/lib/libkafs5/version.map                                                          |      22 +
 head/kerberos5/lib/libkdc/Makefile                                                               |      40 +
 head/kerberos5/lib/libwind/Makefile                                                              |      35 +
 head/kerberos5/libexec/digest-service/Makefile                                                   |      18 +
 head/kerberos5/libexec/kdigest/Makefile                                                          |      21 +
 head/kerberos5/libexec/kfd/Makefile                                                              |      15 +
 head/kerberos5/libexec/kimpersonate/Makefile                                                     |      16 +
 head/kerberos5/usr.bin/hxtool/Makefile                                                           |      18 +
 head/kerberos5/usr.bin/kcc/Makefile                                                              |      27 +
 head/kerberos5/usr.bin/kf/Makefile                                                               |      15 +
 head/kerberos5/usr.bin/kgetcred/Makefile                                                         |      11 +
 head/kerberos5/usr.bin/string2key/Makefile                                                       |      17 +
 head/kerberos5/usr.sbin/iprop-log/Makefile                                                       |      21 +
 head/lib/clang/include/MipsGenMCCodeEmitter.inc                                                  |       2 +
 head/lib/clang/include/clang/Basic/DiagnosticSerializationKinds.inc                              |       2 +
 head/lib/clang/include/clang/Config/config.h                                                     |      20 +
 head/lib/clang/include/clang/Sema/AttrParsedAttrKinds.inc                                        |       2 +
 head/lib/clang/include/clang/Sema/AttrParsedAttrList.inc                                         |       2 +
 head/lib/clang/include/clang/Sema/AttrTemplateInstantiate.inc                                    |       2 +
 head/lib/clang/libclangedit/Makefile                                                             |      16 +
 head/lib/clang/libllvmmipsasmparser/Makefile                                                     |      13 +
 head/lib/clang/libllvmvectorize/Makefile                                                         |      11 +
 head/lib/libc/arm/gen/__aeabi_read_tp.S                                                          |      40 +
 head/lib/libc/stdlib/jemalloc/Makefile.inc                                                       |      46 +
 head/lib/libc/stdlib/jemalloc/Symbol.map                                                         |      35 +
 head/lib/libc/stdlib/reallocf.3                                                                  |      82 +
 head/lib/libpmc/pmc.mips24k.3                                                                    |     413 +
 head/lib/libpmc/pmc.octeon.3                                                                     |     253 +
 head/lib/libpmc/pmc.soft.3                                                                       |     104 +
 head/share/examples/csh/dot.cshrc                                                                |      66 +
 head/sys/arm/conf/GUMSTIX-QEMU                                                                   |      92 +
 head/sys/cddl/contrib/opensolaris/uts/mips/dtrace/fasttrap_isa.c                                 |      30 +
 head/sys/cddl/contrib/opensolaris/uts/mips/sys/fasttrap_isa.h                                    |      48 +
 head/sys/cddl/dev/dtrace/mips/dtrace_asm.S                                                       |     303 +
 head/sys/cddl/dev/dtrace/mips/dtrace_isa.c                                                       |     726 +
 head/sys/cddl/dev/dtrace/mips/dtrace_subr.c                                                      |     203 +
 head/sys/cddl/dev/dtrace/mips/regset.h                                                           |      62 +
 head/sys/contrib/dev/acpica/compiler/preprocess.h                                                |     290 +
 head/sys/contrib/dev/acpica/compiler/prexpress.c                                                 |     305 +
 head/sys/contrib/dev/acpica/compiler/prmacros.c                                                  |     574 +
 head/sys/contrib/dev/acpica/compiler/prparser.l                                                  |     153 +
 head/sys/contrib/dev/acpica/compiler/prparser.y                                                  |     284 +
 head/sys/contrib/dev/acpica/compiler/prscan.c                                                    |     749 +
 head/sys/contrib/dev/acpica/compiler/prutils.c                                                   |     550 +
 head/sys/dev/hwpmc/hwpmc_octeon.c                                                                |     195 +
 head/sys/dev/hwpmc/hwpmc_soft.c                                                                  |     485 +
 head/sys/dev/hwpmc/hwpmc_soft.h                                                                  |      48 +
 head/sys/dev/iicbus/ds1374.c                                                                     |     143 +
 head/sys/dev/iicbus/iicoc.c                                                                      |     390 +
 head/sys/dev/iicbus/iicoc.h                                                                      |      78 +
 head/sys/dev/iicbus/pcf8563.c                                                                    |     202 +
 head/sys/dev/iicbus/pcf8563reg.h                                                                 |      58 +
 head/sys/dev/mfi/mfi_syspd.c                                                                     |     276 +
 head/sys/dev/mfi/mfi_tbolt.c                                                                     |    1339 +
 head/sys/dev/mpt/mpilib/mpi_log_fc.h                                                             |     117 +
 head/sys/dev/mpt/mpilib/mpi_log_sas.h                                                            |     352 +
 head/sys/dev/netmap/netmap_mem1.c                                                                |     521 +
 head/sys/dev/netmap/netmap_mem2.c                                                                |     720 +
 head/sys/dev/uart/uart_cpu_x86.c                                                                 |     107 +
 head/sys/mips/cavium/octeon_pmc.c                                                                |     130 +
 head/sys/mips/conf/AP94                                                                          |      35 +
 head/sys/mips/conf/AP94.hints                                                                    |      28 +
 head/sys/mips/conf/AP96                                                                          |      42 +
 head/sys/mips/conf/AP96.hints                                                                    |      75 +
 head/sys/mips/conf/XLP.hints                                                                     |       5 +
 head/sys/mips/nlm/board_cpld.c                                                                   |     113 +
 head/sys/mips/nlm/board_eeprom.c                                                                 |     172 +
 head/sys/mips/nlm/dev/cfi_pci_xlp.c                                                              |      77 +
 head/sys/mips/nlm/dev/net/mdio.c                                                                 |     301 +
 head/sys/mips/nlm/dev/net/nae.c                                                                  |    1536 +
 head/sys/mips/nlm/dev/net/sgmii.c                                                                |     216 +
 head/sys/mips/nlm/dev/net/ucore/crt0_basic.S                                                     |      66 +
 head/sys/mips/nlm/dev/net/ucore/ld.ucore.S                                                       |     162 +
 head/sys/mips/nlm/dev/net/ucore/ucore.h                                                          |     352 +
 head/sys/mips/nlm/dev/net/ucore/ucore_app.c                                                      |      58 +
 head/sys/mips/nlm/dev/net/xaui.c                                                                 |     251 +
 head/sys/mips/nlm/dev/net/xlpge.c                                                                |    1574 +
 head/sys/mips/nlm/dev/net/xlpge.h                                                                |     135 +
 head/sys/mips/nlm/dev/sec/nlmrsa.c                                                               |     556 +
 head/sys/mips/nlm/dev/sec/nlmrsalib.h                                                            |      68 +
 head/sys/mips/nlm/dev/sec/nlmsec.c                                                               |     850 +
 head/sys/mips/nlm/dev/sec/nlmseclib.c                                                            |     307 +
 head/sys/mips/nlm/dev/sec/nlmseclib.h                                                            |     157 +
 head/sys/mips/nlm/dev/sec/rsa_ucode.h                                                            |     956 +
 head/sys/mips/nlm/dev/uart_pci_xlp.c                                                             |      83 +
 head/sys/mips/nlm/hal/gbu.h                                                                      |     100 +
 head/sys/mips/nlm/hal/interlaken.h                                                               |      70 +
 head/sys/mips/nlm/hal/mdio.h                                                                     |     104 +
 head/sys/mips/nlm/hal/nae.h                                                                      |     646 +
 head/sys/mips/nlm/hal/nlmsaelib.h                                                                |     607 +
 head/sys/mips/nlm/hal/poe.h                                                                      |     352 +
 head/sys/mips/nlm/hal/sgmii.h                                                                    |     217 +
 head/sys/mips/nlm/hal/ucore_loader.h                                                             |     141 +
 head/sys/mips/nlm/hal/xaui.h                                                                     |     193 +
 head/sys/x86/include/legacyvar.h                                                                 |      63 +
 head/sys/x86/x86/legacy.c                                                                        |     372 +
 head/usr.sbin/pkg/Makefile                                                                       |      10 +
 head/usr.sbin/pkg/elf_tables.h                                                                   |      79 +
 head/usr.sbin/pkg/pkg.c                                                                          |     384 +
 1651 files changed, 441656 insertions(+), 235 deletions(-)

diffs (448425 lines):

diff -r d27f0705b100 -r 7bbd6bca528b head/cddl/contrib/opensolaris/lib/libdtrace/mips/dt_isadep.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/cddl/contrib/opensolaris/lib/libdtrace/mips/dt_isadep.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,75 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <stdlib.h>
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include <libgen.h>
+
+#include <dt_impl.h>
+#include <dt_pid.h>
+
+/*ARGSUSED*/
+int
+dt_pid_create_entry_probe(struct ps_prochandle *P, dtrace_hdl_t *dtp,
+    fasttrap_probe_spec_t *ftp, const GElf_Sym *symp)
+{
+
+	dt_dprintf("%s: unimplemented\n", __func__);
+	return (DT_PROC_ERR);
+}
+
+int
+dt_pid_create_return_probe(struct ps_prochandle *P, dtrace_hdl_t *dtp,
+    fasttrap_probe_spec_t *ftp, const GElf_Sym *symp, uint64_t *stret)
+{
+
+	dt_dprintf("%s: unimplemented\n", __func__);
+	return (DT_PROC_ERR);
+}
+
+/*ARGSUSED*/
+int
+dt_pid_create_offset_probe(struct ps_prochandle *P, dtrace_hdl_t *dtp,
+    fasttrap_probe_spec_t *ftp, const GElf_Sym *symp, ulong_t off)
+{
+
+	dt_dprintf("%s: unimplemented\n", __func__);
+	return (DT_PROC_ERR);
+}
+
+/*ARGSUSED*/
+int
+dt_pid_create_glob_offset_probes(struct ps_prochandle *P, dtrace_hdl_t *dtp,
+    fasttrap_probe_spec_t *ftp, const GElf_Sym *symp, const char *pattern)
+{
+
+	dt_dprintf("%s: unimplemented\n", __func__);
+	return (DT_PROC_ERR);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/com_err/ChangeLog
--- a/head/contrib/com_err/ChangeLog	Thu Apr 05 16:58:30 2012 +0300
+++ b/head/contrib/com_err/ChangeLog	Tue Apr 17 11:33:49 2012 +0300
@@ -1,235 +0,0 @@
-2007-07-17  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* Makefile.am: split source files in dist and nodist.
-
-2007-07-16  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* Makefile.am: Only do roken rename for the library.
-
-2007-07-15  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* Makefile.am: use version script.
-	
-	* version-script.map: use version script.
-
-2007-07-10  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* Makefile.am: New library version.
-	
-2006-10-19  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* Makefile.am (compile_et_SOURCES): add lex.h
-	
-2005-12-12  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* com_err.3: Document the _r functions.
-	
-2005-07-07  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* com_err.h: Include <stdarg.h> for va_list to help AIX 5.2.
-	
-2005-06-16  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* parse.y: rename base to base_id since flex defines a function
-	with the argument base
-
-	* compile_et.h: rename base to base_id since flex defines a
-	function with the argument base
-
-	* compile_et.c: rename base to base_id since flex defines a
-	function with the argument base
-
-	* parse.y (name2number): rename base to num to avoid shadowing
-	
-	* compile_et.c: rename optind to optidx
-	
-2005-05-16  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* parse.y: check allocation errors
-
-	* lex.l: check allocation errors correctly
-	
-	* compile_et.h: include <err.h>
-	
-	* (main): compile_et.c: use strlcpy
-	
-2005-04-29  Dave Love  <fx at gnu.org>
-
-	* Makefile.am (LDADD): Add libcom_err.la
-
-2005-04-24  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* include strlcpy and *printf and use them
-
-2005-02-03  Love Hörnquist Åstrand  <lha at it.su.se>
-
-	* com_right.h: de-__P
-
-	* com_err.h: de-__P
-
-2002-08-20  Johan Danielsson  <joda at pdc.kth.se>
-
-	* compile_et.c: don't add comma after last enum member
-
-2002-08-12  Johan Danielsson  <joda at pdc.kth.se>
-
-	* compile_et.c: just declare er_list directly instead of including
-	com_right in generated header files
-
-2002-03-11  Assar Westerlund  <assar at sics.se>
-
-	* Makefile.am (libcom_err_la_LDFLAGS): set version to 2:1:1
-
-2002-03-10  Assar Westerlund  <assar at sics.se>
-
-	* com_err.c (error_message): do not call strerror with a negative error
-
-2001-05-17  Assar Westerlund  <assar at sics.se>
-
-	* Makefile.am: bump version to 2:0:1
-
-2001-05-11  Assar Westerlund  <assar at sics.se>
-
-	* com_err.h (add_to_error_table): add prototype
-	* com_err.c (add_to_error_table): new function, from Derrick J
-	Brashear <shadow at dementia.org>
-
-2001-05-06  Assar Westerlund  <assar at sics.se>
-
-	* com_err.h: add printf formats for gcc
-
-2001-02-28  Johan Danielsson  <joda at pdc.kth.se>
-
-	* error.c (initialize_error_table_r): put table at end of the list
-
-2001-02-15  Assar Westerlund  <assar at sics.se>
-
-	* com_err.c (default_proc): add printf attributes
-
-2000-08-16  Assar Westerlund  <assar at sics.se>
-
-	* Makefile.am: bump version to 1:1:0
-
-2000-07-31  Assar Westerlund  <assar at sics.se>
-
-	* com_right.h (initialize_error_table_r): fix prototype
-
-2000-04-05  Assar Westerlund  <assar at sics.se>
-
-	* com_err.c (_et_lit): explicitly initialize it to NULL to make
-	dyld on Darwin/MacOS X happy
-
-2000-01-16  Assar Westerlund  <assar at sics.se>
-
-	* com_err.h: remove __P definition (now in com_right.h).  this
-	file always includes com_right.h so that's where it should reside.
-	* com_right.h: moved __P here and added it to the function
-	prototypes
-	* com_err.h (error_table_name): add __P
-
-1999-07-03  Assar Westerlund  <assar at sics.se>
-
-	* parse.y (statement): use asprintf
-
-1999-06-13  Assar Westerlund  <assar at sics.se>
-
-	* Makefile.in: make it solaris make vpath-safe
-
-Thu Apr  1 11:13:53 1999  Johan Danielsson  <joda at hella.pdc.kth.se>
-
-	* compile_et.c: use getargs
-
-Sat Mar 20 00:16:30 1999  Assar Westerlund  <assar at sics.se>
-
-	* compile_et.c: static-ize
-
-Thu Mar 18 11:22:13 1999  Johan Danielsson  <joda at hella.pdc.kth.se>
-
-	* Makefile.am: include Makefile.am.common
-
-Tue Mar 16 22:30:05 1999  Assar Westerlund  <assar at sics.se>
-
-	* parse.y: use YYACCEPT instead of return
-
-Sat Mar 13 22:22:56 1999  Assar Westerlund  <assar at sics.se>
-
-	* compile_et.c (generate_h): cast when calling is* to get rid of a
- 	warning
-
-Thu Mar 11 15:00:51 1999  Johan Danielsson  <joda at hella.pdc.kth.se>
-
-	* parse.y: prototype for error_message
-
-Sun Nov 22 10:39:02 1998  Assar Westerlund  <assar at sics.se>
-
-	* compile_et.h: include ctype and roken
-
-	* compile_et.c: include err.h
-	(generate_h): remove unused variable
-
-	* Makefile.in (WFLAGS): set
-
-Fri Nov 20 06:58:59 1998  Assar Westerlund  <assar at sics.se>
-
-	* lex.l: undef ECHO to work around AIX lex bug
-
-Sun Sep 27 02:23:59 1998  Johan Danielsson  <joda at hella.pdc.kth.se>
-
-	* com_err.c (error_message): try to pass code to strerror, to see
- 	if it might be an errno code (this if broken, but some MIT code
- 	seems to expect this behaviour)
-
-Sat Sep 26 17:42:39 1998  Johan Danielsson  <joda at hella.pdc.kth.se>
-
-	* compile_et.c: <foo_err.h> -> "foo_err.h"
-
-Tue Jun 30 17:17:36 1998  Assar Westerlund  <assar at sics.se>
-
-	* Makefile.in: add str{cpy,cat}_truncate
-
-Mon May 25 05:24:39 1998  Assar Westerlund  <assar at sics.se>
-
-	* Makefile.in (clean): try to remove shared library debris
-
-Sun Apr 19 09:50:17 1998  Assar Westerlund  <assar at sics.se>
-
-	* Makefile.in: add symlink magic for linux
-
-Sun Apr  5 09:22:11 1998  Assar Westerlund  <assar at sics.se>
-
-	* parse.y: define alloca to malloc in case we're using bison but
- 	don't have alloca
-
-Tue Mar 24 05:13:01 1998  Assar Westerlund  <assar at sics.se>
-
-	* Makefile.in: link with snprintf (From Derrick J Brashear
- 	<shadow at dementia.org>)
-
-Fri Feb 27 05:01:42 1998  Assar Westerlund  <assar at sics.se>
-
-	* parse.y: initialize ec->next
-
-Thu Feb 26 02:22:25 1998  Assar Westerlund  <assar at sics.se>
-
-	* Makefile.am: @[email protected]
-
-Sat Feb 21 15:18:54 1998  assar westerlund  <assar at sics.se>
-
-	* Makefile.in: set YACC and LEX
-
-Tue Feb 17 22:20:27 1998  Bjoern Groenvall  <bg at sics.se>
-
-	* com_right.h: Change typedefs so that one may mix MIT compile_et
- 	generated code with krb4 dito.
-
-Tue Feb 17 16:30:55 1998  Johan Danielsson  <joda at emma.pdc.kth.se>
-
-	* compile_et.c (generate): Always return a value.
-
-	* parse.y: Files don't have to end with `end'.
-
-Mon Feb 16 16:09:20 1998  Johan Danielsson  <joda at emma.pdc.kth.se>
-
-	* lex.l (getstring): Replace getc() with input().
-
-	* Makefile.am: Fixes for new compile_et.
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/COPYING
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/COPYING	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,27 @@
+Unless otherwise specified, files in the jemalloc source distribution are
+subject to the following license:
+--------------------------------------------------------------------------------
+Copyright (C) 2002-2012 Jason Evans <jasone at canonware.com>.
+All rights reserved.
+Copyright (C) 2007-2012 Mozilla Foundation.  All rights reserved.
+Copyright (C) 2009-2012 Facebook, Inc.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright notice(s),
+   this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice(s),
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
+EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+--------------------------------------------------------------------------------
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/ChangeLog
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/ChangeLog	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,322 @@
+Following are change highlights associated with official releases.  Important
+bug fixes are all mentioned, but internal enhancements are omitted here for
+brevity (even though they are more fun to write about).  Much more detail can be
+found in the git revision history:
+
+    http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
+    git://canonware.com/jemalloc.git
+
+* 3.0.0 (XXX not yet released)
+
+  Although this version adds some major new features, the primary focus is on
+  internal code cleanup that facilitates maintainability and portability, most
+  of which is not reflected in the ChangeLog.  This is the first release to
+  incorporate substantial contributions from numerous other developers, and the
+  result is a more broadly useful allocator (see the git revision history for
+  contribution details).  Note that the license has been unified, thanks to
+  Facebook granting a license under the same terms as the other copyright
+  holders (see COPYING).
+
+  New features:
+  - Implement Valgrind support, redzones, and quarantine.
+  - Add support for additional operating systems:
+    + FreeBSD
+    + Mac OS X Lion
+  - Add support for additional architectures:
+    + MIPS
+    + SH4
+    + Tilera
+  - Add support for cross compiling.
+  - Add nallocm(), which rounds a request size up to the nearest size class
+    without actually allocating.
+  - Implement aligned_alloc() (blame C11).
+  - Add the --disable-munmap option, and make it the default on Linux.
+  - Add the --with-mangling option.
+  - Add the --disable-experimental option.
+  - Add the "thread.tcache.enabled" mallctl.
+
+  Incompatible changes:
+  - Enable stats by default.
+  - Enable fill by default.
+  - Disable lazy locking by default.
+  - Rename the "tcache.flush" mallctl to "thread.tcache.flush".
+  - Rename the "arenas.pagesize" mallctl to "arenas.page".
+
+  Removed features:
+  - Remove the swap feature, including the "config.swap", "swap.avail",
+    "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls.
+  - Remove highruns statistics, including the
+    "stats.arenas.<i>.bins.<j>.highruns" and
+    "stats.arenas.<i>.lruns.<j>.highruns" mallctls.
+  - As part of small size class refactoring, remove the "opt.lg_[qc]space_max",
+    "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and
+    "arenas.[tqcs]bins" mallctls.
+  - Remove the "arenas.chunksize" mallctl.
+  - Remove the "opt.lg_prof_tcmax" option.
+  - Remove the "opt.lg_prof_bt_max" option.
+  - Remove the "opt.lg_tcache_gc_sweep" option.
+  - Remove the --disable-tiny option, including the "config.tiny" mallctl.
+  - Remove the --enable-dynamic-page-shift configure option.
+  - Remove the --enable-sysv configure option.
+
+  Bug fixes:
+  - Fix fork-related bugs that could cause deadlock in children between fork
+    and exec.
+  - Fix a statistics-related bug in the "thread.arena" mallctl that could cause
+    invalid statistics and crashes.
+  - Work around TLS dallocation via free() on Linux.  This bug could cause
+    write-after-free memory corruption.
+  - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter.
+  - Fix realloc(p, 0) to act like free(p).
+  - Do not enforce minimum alignment in memalign().
+  - Check for NULL pointer in malloc_usable_size().
+  - Fix bin->runcur management to fix a layout policy bug.  This bug did not
+    affect correctness.
+  - Fix a bug in choose_arena_hard() that potentially caused more arenas to be
+    initialized than necessary.
+  - Add missing "opt.lg_tcache_max" mallctl implementation.
+  - Use glibc allocator hooks to make mixed allocator usage less likely.
+  - Fix build issues for --disable-tcache.
+
+* 2.2.5 (November 14, 2011)
+
+  Bug fixes:
+  - Fix huge_ralloc() race when using mremap(2).  This is a serious bug that
+    could cause memory corruption and/or crashes.
+  - Fix huge_ralloc() to maintain chunk statistics.
+  - Fix malloc_stats_print(..., "a") output.
+
+* 2.2.4 (November 5, 2011)
+
+  Bug fixes:
+  - Initialize arenas_tsd before using it.  This bug existed for 2.2.[0-3], as
+    well as for --disable-tls builds in earlier releases.
+  - Do not assume a 4 KiB page size in test/rallocm.c.
+
+* 2.2.3 (August 31, 2011)
+
+  This version fixes numerous bugs related to heap profiling.
+
+  Bug fixes:
+  - Fix a prof-related race condition.  This bug could cause memory corruption,
+    but only occurred in non-default configurations (prof_accum:false).
+  - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is
+    excluded from backtraces).
+  - Fix a prof-related bug in realloc() (only triggered by OOM errors).
+  - Fix prof-related bugs in allocm() and rallocm().
+  - Fix prof_tdata_cleanup() for --disable-tls builds.
+  - Fix a relative include path, to fix objdir builds.
+
+* 2.2.2 (July 30, 2011)
+
+  Bug fixes:
+  - Fix a build error for --disable-tcache.
+  - Fix assertions in arena_purge() (for real this time).
+  - Add the --with-private-namespace option.  This is a workaround for symbol
+    conflicts that can inadvertently arise when using static libraries.
+
+* 2.2.1 (March 30, 2011)
+
+  Bug fixes:
+  - Implement atomic operations for x86/x64.  This fixes compilation failures
+    for versions of gcc that are still in wide use.
+  - Fix an assertion in arena_purge().
+
+* 2.2.0 (March 22, 2011)
+
+  This version incorporates several improvements to algorithms and data
+  structures that tend to reduce fragmentation and increase speed.
+
+  New features:
+  - Add the "stats.cactive" mallctl.
+  - Update pprof (from google-perftools 1.7).
+  - Improve backtracing-related configuration logic, and add the
+    --disable-prof-libgcc option.
+
+  Bug fixes:
+  - Change default symbol visibility from "internal", to "hidden", which
+    decreases the overhead of library-internal function calls.
+  - Fix symbol visibility so that it is also set on OS X.
+  - Fix a build dependency regression caused by the introduction of the .pic.o
+    suffix for PIC object files.
+  - Add missing checks for mutex initialization failures.
+  - Don't use libgcc-based backtracing except on x64, where it is known to work.
+  - Fix deadlocks on OS X that were due to memory allocation in
+    pthread_mutex_lock().
+  - Heap profiling-specific fixes:
+    + Fix memory corruption due to integer overflow in small region index
+      computation, when using a small enough sample interval that profiling
+      context pointers are stored in small run headers.
+    + Fix a bootstrap ordering bug that only occurred with TLS disabled.
+    + Fix a rallocm() rsize bug.
+    + Fix error detection bugs for aligned memory allocation.
+
+* 2.1.3 (March 14, 2011)
+
+  Bug fixes:
+  - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix
+    for OS X in 2.1.2).
+  - Fix a "thread.arena" mallctl bug.
+  - Fix a thread cache stats merging bug.
+
+* 2.1.2 (March 2, 2011)
+
+  Bug fixes:
+  - Fix "thread.{de,}allocatedp" mallctl for OS X.
+  - Add missing jemalloc.a to build system.
+
+* 2.1.1 (January 31, 2011)
+
+  Bug fixes:
+  - Fix aligned huge reallocation (affected allocm()).
+  - Fix the ALLOCM_LG_ALIGN macro definition.
+  - Fix a heap dumping deadlock.
+  - Fix a "thread.arena" mallctl bug.
+
+* 2.1.0 (December 3, 2010)
+
+  This version incorporates some optimizations that can't quite be considered
+  bug fixes.
+
+  New features:
+  - Use Linux's mremap(2) for huge object reallocation when possible.
+  - Avoid locking in mallctl*() when possible.
+  - Add the "thread.[de]allocatedp" mallctl's.
+  - Convert the manual page source from roff to DocBook, and generate both roff
+    and HTML manuals.
+
+  Bug fixes:
+  - Fix a crash due to incorrect bootstrap ordering.  This only impacted
+    --enable-debug --enable-dss configurations.
+  - Fix a minor statistics bug for mallctl("swap.avail", ...).
+
+* 2.0.1 (October 29, 2010)
+
+  Bug fixes:
+  - Fix a race condition in heap profiling that could cause undefined behavior
+    if "opt.prof_accum" were disabled.
+  - Add missing mutex unlocks for some OOM error paths in the heap profiling
+    code.
+  - Fix a compilation error for non-C99 builds.
+
+* 2.0.0 (October 24, 2010)
+
+  This version focuses on the experimental *allocm() API, and on improved
+  run-time configuration/introspection.  Nonetheless, numerous performance
+  improvements are also included.
+
+  New features:
+  - Implement the experimental {,r,s,d}allocm() API, which provides a superset
+    of the functionality available via malloc(), calloc(), posix_memalign(),
+    realloc(), malloc_usable_size(), and free().  These functions can be used to
+    allocate/reallocate aligned zeroed memory, ask for optional extra memory
+    during reallocation, prevent object movement during reallocation, etc.
+  - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is
+    more human-readable, and more flexible.  For example:
+      JEMALLOC_OPTIONS=AJP
+    is now:
+      MALLOC_CONF=abort:true,fill:true,stats_print:true
+  - Port to Apple OS X.  Sponsored by Mozilla.
+  - Make it possible for the application to control thread-->arena mappings via
+    the "thread.arena" mallctl.
+  - Add compile-time support for all TLS-related functionality via pthreads TSD.
+    This is mainly of interest for OS X, which does not support TLS, but has a
+    TSD implementation with similar performance.
+  - Override memalign() and valloc() if they are provided by the system.
+  - Add the "arenas.purge" mallctl, which can be used to synchronously purge all
+    dirty unused pages.
+  - Make cumulative heap profiling data optional, so that it is possible to
+    limit the amount of memory consumed by heap profiling data structures.
+  - Add per thread allocation counters that can be accessed via the
+    "thread.allocated" and "thread.deallocated" mallctls.
+
+  Incompatible changes:
+  - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above).
+  - Increase default backtrace depth from 4 to 128 for heap profiling.
+  - Disable interval-based profile dumps by default.
+
+  Bug fixes:
+  - Remove bad assertions in fork handler functions.  These assertions could
+    cause aborts for some combinations of configure settings.
+  - Fix strerror_r() usage to deal with non-standard semantics in GNU libc.
+  - Fix leak context reporting.  This bug tended to cause the number of contexts
+    to be underreported (though the reported number of objects and bytes were
+    correct).
+  - Fix a realloc() bug for large in-place growing reallocation.  This bug could
+    cause memory corruption, but it was hard to trigger.
+  - Fix an allocation bug for small allocations that could be triggered if
+    multiple threads raced to create a new run of backing pages.
+  - Enhance the heap profiler to trigger samples based on usable size, rather
+    than request size.
+  - Fix a heap profiling bug due to sometimes losing track of requested object
+    size for sampled objects.
+
+* 1.0.3 (August 12, 2010)
+
+  Bug fixes:
+  - Fix the libunwind-based implementation of stack backtracing (used for heap
+    profiling).  This bug could cause zero-length backtraces to be reported.
+  - Add a missing mutex unlock in library initialization code.  If multiple
+    threads raced to initialize malloc, some of them could end up permanently
+    blocked.
+
+* 1.0.2 (May 11, 2010)
+
+  Bug fixes:
+  - Fix junk filling of large objects, which could cause memory corruption.
+  - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual
+    memory limits could cause swap file configuration to fail.  Contributed by
+    Jordan DeLong.
+
+* 1.0.1 (April 14, 2010)
+
+  Bug fixes:
+  - Fix compilation when --enable-fill is specified.
+  - Fix threads-related profiling bugs that affected accuracy and caused memory
+    to be leaked during thread exit.
+  - Fix dirty page purging race conditions that could cause crashes.
+  - Fix crash in tcache flushing code during thread destruction.
+
+* 1.0.0 (April 11, 2010)
+
+  This release focuses on speed and run-time introspection.  Numerous
+  algorithmic improvements make this release substantially faster than its
+  predecessors.
+
+  New features:
+  - Implement autoconf-based configuration system.
+  - Add mallctl*(), for the purposes of introspection and run-time
+    configuration.
+  - Make it possible for the application to manually flush a thread's cache, via
+    the "tcache.flush" mallctl.
+  - Base maximum dirty page count on proportion of active memory.
+  - Compute various addtional run-time statistics, including per size class
+    statistics for large objects.
+  - Expose malloc_stats_print(), which can be called repeatedly by the
+    application.
+  - Simplify the malloc_message() signature to only take one string argument,
+    and incorporate an opaque data pointer argument for use by the application
+    in combination with malloc_stats_print().
+  - Add support for allocation backed by one or more swap files, and allow the
+    application to disable over-commit if swap files are in use.
+  - Implement allocation profiling and leak checking.
+
+  Removed features:
+  - Remove the dynamic arena rebalancing code, since thread-specific caching
+    reduces its utility.
+
+  Bug fixes:
+  - Modify chunk allocation to work when address space layout randomization
+    (ASLR) is in use.
+  - Fix thread cleanup bugs related to TLS destruction.
+  - Handle 0-size allocation requests in posix_memalign().
+  - Fix a chunk leak.  The leaked chunks were never touched, so this impacted
+    virtual memory usage, but not physical memory usage.
+
+* linux_2008082[78]a (August 27/28, 2008)
+
+  These snapshot releases are the simple result of incorporating Linux-specific
+  support into the FreeBSD malloc sources.
+
+--------------------------------------------------------------------------------
+vim:filetype=text:textwidth=80
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/FREEBSD-Xlist
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/FREEBSD-Xlist	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,23 @@
+$FreeBSD$
+.git
+.gitignore
+FREEBSD-*
+INSTALL
+Makefile*
+README
+autogen.sh
+autom4te.cache/
+bin/
+config.*
+configure*
+doc/*.in
+doc/*.xml
+doc/*.xsl
+doc/*.html
+include/jemalloc/internal/jemalloc_internal.h.in
+include/jemalloc/internal/size_classes.sh
+include/jemalloc/jemalloc.h.in
+include/jemalloc/jemalloc_defs.h.in
+install-sh
+src/zone.c
+test/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/FREEBSD-diffs
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/FREEBSD-diffs	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,247 @@
+diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
+index 98d0ba4..23d2152 100644
+--- a/doc/jemalloc.xml.in
++++ b/doc/jemalloc.xml.in
[email protected]@ -51,12 +51,23 @@
+     <para>This manual describes jemalloc @jemalloc_version at .  More information
+     can be found at the <ulink
+     url="http://www.canonware.com/jemalloc/">jemalloc website</ulink>.</para>
++
++    <para>The following configuration options are enabled in libc's built-in
++    jemalloc: <option>--enable-dss</option>,
++    <option>--enable-experimental</option>, <option>--enable-fill</option>,
++    <option>--enable-lazy-lock</option>, <option>--enable-munmap</option>,
++    <option>--enable-stats</option>, <option>--enable-tcache</option>,
++    <option>--enable-tls</option>, <option>--enable-utrace</option>, and
++    <option>--enable-xmalloc</option>.  Additionally,
++    <option>--enable-debug</option> is enabled in development versions of
++    FreeBSD (controlled by the <constant>MALLOC_PRODUCTION</constant> make
++    variable).</para>
+   </refsect1>
+   <refsynopsisdiv>
+     <title>SYNOPSIS</title>
+     <funcsynopsis>
+       <funcsynopsisinfo>#include <<filename class="headerfile">stdlib.h</filename>>
+-#include <<filename class="headerfile">jemalloc/jemalloc.h</filename>></funcsynopsisinfo>
++#include <<filename class="headerfile">malloc_np.h</filename>></funcsynopsisinfo>
+       <refsect2>
+         <title>Standard API</title>
+         <funcprototype>
[email protected]@ -2080,4 +2091,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
+     <para>The <function>posix_memalign<parameter/></function> function conforms
+     to IEEE Std 1003.1-2001 (“POSIX.1”).</para>
+   </refsect1>
++  <refsect1 id="history">
++    <title>HISTORY</title>
++    <para>The <function>malloc_usable_size<parameter/></function> and
++    <function>posix_memalign<parameter/></function> functions first appeared in
++    FreeBSD 7.0.</para>
++
++    <para>The <function>aligned_alloc<parameter/></function>,
++    <function>malloc_stats_print<parameter/></function>,
++    <function>mallctl*<parameter/></function>, and
++    <function>*allocm<parameter/></function> functions first appeared in
++    FreeBSD 10.0.</para>
++  </refsect1>
+ </refentry>
+diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
+index aa21aa5..e0f5fed 100644
+--- a/include/jemalloc/internal/jemalloc_internal.h.in
++++ b/include/jemalloc/internal/jemalloc_internal.h.in
[email protected]@ -1,3 +1,6 @@
++#include "libc_private.h"
++#include "namespace.h"
++
+ #include <sys/mman.h>
+ #include <sys/param.h>
+ #include <sys/syscall.h>
[email protected]@ -33,6 +36,9 @@
+ #include <pthread.h>
+ #include <math.h>
+ 
++#include "un-namespace.h"
++#include "libc_private.h"
++
+ #define	JEMALLOC_NO_DEMANGLE
+ #include "../jemalloc at install_suffix@.h"
+ 
+diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
+index c46feee..d7133f4 100644
+--- a/include/jemalloc/internal/mutex.h
++++ b/include/jemalloc/internal/mutex.h
[email protected]@ -39,8 +39,6 @@ struct malloc_mutex_s {
+ 
+ #ifdef JEMALLOC_LAZY_LOCK
+ extern bool isthreaded;
+-#else
+-#  define isthreaded true
+ #endif
+ 
+ bool	malloc_mutex_init(malloc_mutex_t *mutex);
+diff --git a/include/jemalloc/jemalloc.h.in b/include/jemalloc/jemalloc.h.in
+index f0581db..f26d8bc 100644
+--- a/include/jemalloc/jemalloc.h.in
++++ b/include/jemalloc/jemalloc.h.in
[email protected]@ -15,6 +15,7 @@ extern "C" {
+ #define	JEMALLOC_VERSION_GID "@[email protected]"
+ 
+ #include "jemalloc_defs at install_suffix@.h"
++#include "jemalloc_FreeBSD.h"
+ 
+ #ifdef JEMALLOC_EXPERIMENTAL
+ #define	ALLOCM_LG_ALIGN(la)	(la)
+diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h
+new file mode 100644
+index 0000000..2c5797f
+--- /dev/null
++++ b/include/jemalloc/jemalloc_FreeBSD.h
[email protected]@ -0,0 +1,76 @@
++/*
++ * Override settings that were generated in jemalloc_defs.h as necessary.
++ */
++
++#undef JEMALLOC_OVERRIDE_VALLOC
++
++#ifndef MALLOC_PRODUCTION
++#define	JEMALLOC_DEBUG
++#endif
++
++/*
++ * The following are architecture-dependent, so conditionally define them for
++ * each supported architecture.
++ */
++#undef CPU_SPINWAIT
++#undef JEMALLOC_TLS_MODEL
++#undef STATIC_PAGE_SHIFT
++#undef LG_SIZEOF_PTR
++#undef LG_SIZEOF_INT
++#undef LG_SIZEOF_LONG
++#undef LG_SIZEOF_INTMAX_T
++
++#ifdef __i386__
++#  define LG_SIZEOF_PTR		2
++#  define CPU_SPINWAIT		__asm__ volatile("pause")
++#  define JEMALLOC_TLS_MODEL	__attribute__((tls_model("initial-exec")))
++#endif
++#ifdef __ia64__
++#  define LG_SIZEOF_PTR		3
++#endif
++#ifdef __sparc64__
++#  define LG_SIZEOF_PTR		3
++#  define JEMALLOC_TLS_MODEL	__attribute__((tls_model("initial-exec")))
++#endif
++#ifdef __amd64__
++#  define LG_SIZEOF_PTR		3
++#  define CPU_SPINWAIT		__asm__ volatile("pause")
++#  define JEMALLOC_TLS_MODEL	__attribute__((tls_model("initial-exec")))
++#endif
++#ifdef __arm__
++#  define LG_SIZEOF_PTR		2
++#endif
++#ifdef __mips__
++#  define LG_SIZEOF_PTR		2
++#endif
++#ifdef __powerpc64__
++#  define LG_SIZEOF_PTR		3
++#elif defined(__powerpc__)
++#  define LG_SIZEOF_PTR		2
++#endif
++
++#ifndef JEMALLOC_TLS_MODEL
++#  define JEMALLOC_TLS_MODEL	/* Default. */
++#endif
++#ifdef __clang__
++#  undef JEMALLOC_TLS_MODEL
++#  define JEMALLOC_TLS_MODEL	/* clang does not support tls_model yet. */
++#endif
++
++#define	STATIC_PAGE_SHIFT	PAGE_SHIFT
++#define	LG_SIZEOF_INT		2
++#define	LG_SIZEOF_LONG		LG_SIZEOF_PTR
++#define	LG_SIZEOF_INTMAX_T	3
++
++/* Disable lazy-lock machinery, mangle isthreaded, and adjust its type. */
++#undef JEMALLOC_LAZY_LOCK
++extern int __isthreaded;
++#define	isthreaded		((bool)__isthreaded)
++
++/* Mangle. */
++#define	open			_open
++#define	read			_read
++#define	write			_write
++#define	close			_close
++#define	pthread_mutex_lock	_pthread_mutex_lock
++#define	pthread_mutex_unlock	_pthread_mutex_unlock
+diff --git a/src/jemalloc.c b/src/jemalloc.c
+index 0decd8a..73fad29 100644
+--- a/src/jemalloc.c
++++ b/src/jemalloc.c
[email protected]@ -8,6 +8,9 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
+ malloc_tsd_data(, thread_allocated, thread_allocated_t,
+     THREAD_ALLOCATED_INITIALIZER)
+ 
++const char	*__malloc_options_1_0;
++__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
++
+ /* Runtime configuration options. */
+ const char	*je_malloc_conf JEMALLOC_ATTR(visibility("default"));
+ #ifdef JEMALLOC_DEBUG
[email protected]@ -401,7 +404,8 @@ malloc_conf_init(void)
+ #endif
+ 			    ;
+ 
+-			if ((opts = getenv(envname)) != NULL) {
++			if (issetugid() == 0 && (opts = getenv(envname)) !=
++			    NULL) {
+ 				/*
+ 				 * Do nothing; opts is already initialized to
+ 				 * the value of the MALLOC_CONF environment
+diff --git a/src/mutex.c b/src/mutex.c
+index 4b8ce57..7be5fc9 100644
+--- a/src/mutex.c
++++ b/src/mutex.c
[email protected]@ -63,6 +63,17 @@ pthread_create(pthread_t *__restrict thread,
+ #ifdef JEMALLOC_MUTEX_INIT_CB
+ int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+     void *(calloc_cb)(size_t, size_t));
++
++__weak_reference(_pthread_mutex_init_calloc_cb_stub,
++    _pthread_mutex_init_calloc_cb);
++
++int
++_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
++    void *(calloc_cb)(size_t, size_t))
++{
++
++	return (0);
++}
+ #endif
+ 
+ bool
+diff --git a/src/util.c b/src/util.c
+index 2aab61f..8b05042 100644
+--- a/src/util.c
++++ b/src/util.c
[email protected]@ -60,6 +60,22 @@ wrtmessage(void *cbopaque, const char *s)
+ void	(*je_malloc_message)(void *, const char *s)
+     JEMALLOC_ATTR(visibility("default")) = wrtmessage;
+ 
++JEMALLOC_CATTR(visibility("hidden"), static)
++void
++wrtmessage_1_0(const char *s1, const char *s2, const char *s3,
++    const char *s4)
++{
++
++	wrtmessage(NULL, s1);
++	wrtmessage(NULL, s2);
++	wrtmessage(NULL, s3);
++	wrtmessage(NULL, s4);
++}
++
++void	(*__malloc_message_1_0)(const char *s1, const char *s2, const char *s3,
++    const char *s4) = wrtmessage_1_0;
++__sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0);
++
+ /*
+  * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
+  * provide a wrapper.
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/FREEBSD-upgrade
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/FREEBSD-upgrade	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,122 @@
+#!/bin/sh
+# $FreeBSD$
+#
+# Usage: cd /usr/src/contrib/jemalloc
+#        ./FREEBSD-upgrade <command> [args]
+#
+# At least the following ports are required when importing jemalloc:
+# - devel/autoconf
+# - devel/git
+# - devel/gmake
+# - textproc/docbook-xsl
+#
+# The normal workflow for importing a new release is:
+#
+#   cd /usr/src/contrib/jemalloc
+#
+# Merge local changes that were made since the previous import:
+#
+#   ./FREEBSD-upgrade merge-changes
+#   ./FREEBSD-upgrade rediff
+#
+# Extract latest jemalloc release.
+#
+#   ./FREEBSD-upgrade extract
+#
+# Fix patch conflicts as necessary, then regenerate diffs to update line
+# offsets:
+#
+#   ./FREEBSD-upgrade rediff
+#   ./FREEBSD-upgrade extract
+#
+# Do multiple buildworld/installworld rounds.  If problems arise and patches
+# are needed, edit the code in ${work} as necessary, then:
+#
+#   ./FREEBSD-upgrade rediff
+#   ./FREEBSD-upgrade extract
+#
+# The rediff/extract order is important because rediff saves the local
+# changes, then extract blows away the work tree and re-creates it with the
+# diffs applied.
+#
+# Finally, to clean up:
+#
+#  ./FREEBSD-upgrade clean
+
+set -e
+
+if [ ! -x "FREEBSD-upgrade" ] ; then
+  echo "Run from within src/contrib/jemalloc/" >&2
+  exit 1
+fi
+
+src=`pwd`
+workname="jemalloc.git"
+work="${src}/../${workname}" # merge-changes expects ${workname} in "..".
+changes="${src}/FREEBSD-changes"
+
+do_extract() {
+  local rev=$1
+  # Clone.
+  rm -rf ${work}
+  git clone git://canonware.com/jemalloc.git ${work}
+  (
+    cd ${work}
+    if [ "x${rev}" != "x" ] ; then
+      # Use optional rev argument to check out a revision other than HEAD on
+      # master.
+      git checkout ${rev}
+    fi
+    # Apply diffs before generating files.
+    patch -p1 < "${src}/FREEBSD-diffs"
+    find . -name '*.orig' -delete
+    # Generate various files.
+    ./autogen.sh --enable-cc-silence --enable-dss --enable-xmalloc \
+      --enable-utrace --with-xslroot=/usr/local/share/xsl/docbook
+    gmake dist
+  )
+}
+
+do_diff() {
+  (cd ${work}; git add -A; git diff --cached) > FREEBSD-diffs
+}
+
+command=$1
+shift
+case "${command}" in
+  merge-changes) # Merge local changes that were made since the previous import.
+    rev=`cat VERSION |tr 'g' ' ' |awk '{print $2}'`
+    # Extract code corresponding to most recent import.
+    do_extract ${rev}
+    # Compute local differences to the upstream+patches and apply them.
+    (
+      cd ..
+      diff -ru -X ${src}/FREEBSD-Xlist ${workname} jemalloc > ${changes} || true
+    )
+    (
+      cd ${work}
+      patch -p1 < ${changes}
+      find . -name '*.orig' -delete
+    )
+    # Update diff.
+    do_diff
+    ;;
+  extract) # Extract upstream sources, apply patches, copy to contrib/jemalloc.
+    rev=$1
+    do_extract ${rev}
+    # Delete existing files so that cruft doesn't silently remain.
+    rm -rf ChangeLog COPYING VERSION doc include src
+    # Copy files over.
+    tar cf - -C ${work} -X FREEBSD-Xlist . |tar xvf -
+    ;;
+  rediff) # Regenerate diffs based on working tree.
+    do_diff
+    ;;
+  clean) # Remove working tree and temporary files.
+    rm -rf ${work} ${changes}
+    ;;
+  *)
+    echo "Unsupported command: \"${command}\"" >&2
+    exit 1
+    ;;
+esac
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/VERSION
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/VERSION	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,1 @@
+1.0.0-258-g9ef7f5dc34ff02f50d401e41c8d9a4a928e7c2aa
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/doc/jemalloc.3
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/doc/jemalloc.3	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,1464 @@
+'\" t
+.\"     Title: JEMALLOC
+.\"    Author: Jason Evans
+.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
+.\"      Date: 04/16/2012
+.\"    Manual: User Manual
+.\"    Source: jemalloc 1.0.0-258-g9ef7f5dc34ff02f50d401e41c8d9a4a928e7c2aa
+.\"  Language: English
+.\"
+.TH "JEMALLOC" "3" "04/16/2012" "jemalloc 1.0.0-258-g9ef7f5dc34" "User Manual"
+.\" -----------------------------------------------------------------
+.\" * Define some portability stuff
+.\" -----------------------------------------------------------------
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.\" http://bugs.debian.org/507673
+.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.ie \n(.g .ds Aq \(aq
+.el       .ds Aq '
+.\" -----------------------------------------------------------------
+.\" * set default formatting
+.\" -----------------------------------------------------------------
+.\" disable hyphenation
+.nh
+.\" disable justification (adjust text to left margin only)
+.ad l
+.\" -----------------------------------------------------------------
+.\" * MAIN CONTENT STARTS HERE *
+.\" -----------------------------------------------------------------
+.SH "NAME"
+jemalloc \- general purpose memory allocation functions
+.SH "LIBRARY"
+.PP
+This manual describes jemalloc 1\&.0\&.0\-258\-g9ef7f5dc34ff02f50d401e41c8d9a4a928e7c2aa\&. More information can be found at the
+\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
+.PP
+The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
+\fB\-\-enable\-dss\fR,
+\fB\-\-enable\-experimental\fR,
+\fB\-\-enable\-fill\fR,
+\fB\-\-enable\-lazy\-lock\fR,
+\fB\-\-enable\-munmap\fR,
+\fB\-\-enable\-stats\fR,
+\fB\-\-enable\-tcache\fR,
+\fB\-\-enable\-tls\fR,
+\fB\-\-enable\-utrace\fR, and
+\fB\-\-enable\-xmalloc\fR\&. Additionally,
+\fB\-\-enable\-debug\fR
+is enabled in development versions of FreeBSD (controlled by the
+\fBMALLOC_PRODUCTION\fR
+make variable)\&.
+.SH "SYNOPSIS"
+.sp
+.ft B
+.nf
+#include <stdlib\&.h>
+#include <malloc_np\&.h>
+.fi
+.ft
+.SS "Standard API"
+.HP \w'void\ *malloc('u
+.BI "void *malloc(size_t\ " "size" ");"
+.HP \w'void\ *calloc('u
+.BI "void *calloc(size_t\ " "number" ", size_t\ " "size" ");"
+.HP \w'int\ posix_memalign('u
+.BI "int posix_memalign(void\ **" "ptr" ", size_t\ " "alignment" ", size_t\ " "size" ");"
+.HP \w'void\ *aligned_alloc('u
+.BI "void *aligned_alloc(size_t\ " "alignment" ", size_t\ " "size" ");"
+.HP \w'void\ *realloc('u
+.BI "void *realloc(void\ *" "ptr" ", size_t\ " "size" ");"
+.HP \w'void\ free('u
+.BI "void free(void\ *" "ptr" ");"
+.SS "Non\-standard API"
+.HP \w'size_t\ malloc_usable_size('u
+.BI "size_t malloc_usable_size(const\ void\ *" "ptr" ");"
+.HP \w'void\ malloc_stats_print('u
+.BI "void malloc_stats_print(void\ " "(*write_cb)" "\ (void\ *,\ const\ char\ *), void\ *" "cbopaque" ", const\ char\ *" "opts" ");"
+.HP \w'int\ mallctl('u
+.BI "int mallctl(const\ char\ *" "name" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");"
+.HP \w'int\ mallctlnametomib('u
+.BI "int mallctlnametomib(const\ char\ *" "name" ", size_t\ *" "mibp" ", size_t\ *" "miblenp" ");"
+.HP \w'int\ mallctlbymib('u
+.BI "int mallctlbymib(const\ size_t\ *" "mib" ", size_t\ " "miblen" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");"
+.HP \w'void\ (*malloc_message)('u
+.BI "void (*malloc_message)(void\ *" "cbopaque" ", const\ char\ *" "s" ");"
+.PP
+const char *\fImalloc_conf\fR;
+.SS "Experimental API"
+.HP \w'int\ allocm('u
+.BI "int allocm(void\ **" "ptr" ", size_t\ *" "rsize" ", size_t\ " "size" ", int\ " "flags" ");"
+.HP \w'int\ rallocm('u
+.BI "int rallocm(void\ **" "ptr" ", size_t\ *" "rsize" ", size_t\ " "size" ", size_t\ " "extra" ", int\ " "flags" ");"
+.HP \w'int\ sallocm('u
+.BI "int sallocm(const\ void\ *" "ptr" ", size_t\ *" "rsize" ", int\ " "flags" ");"
+.HP \w'int\ dallocm('u
+.BI "int dallocm(void\ *" "ptr" ", int\ " "flags" ");"
+.HP \w'int\ nallocm('u
+.BI "int nallocm(size_t\ *" "rsize" ", size_t\ " "size" ", int\ " "flags" ");"
+.SH "DESCRIPTION"
+.SS "Standard API"
+.PP
+The
+\fBmalloc\fR\fB\fR
+function allocates
+\fIsize\fR
+bytes of uninitialized memory\&. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object\&.
+.PP
+The
+\fBcalloc\fR\fB\fR
+function allocates space for
+\fInumber\fR
+objects, each
+\fIsize\fR
+bytes in length\&. The result is identical to calling
+\fBmalloc\fR\fB\fR
+with an argument of
+\fInumber\fR
+*
+\fIsize\fR, with the exception that the allocated memory is explicitly initialized to zero bytes\&.
+.PP
+The
+\fBposix_memalign\fR\fB\fR
+function allocates
+\fIsize\fR
+bytes of memory such that the allocation\*(Aqs base address is an even multiple of
+\fIalignment\fR, and returns the allocation in the value pointed to by
+\fIptr\fR\&. The requested
+\fIalignment\fR
+must be a power of 2 at least as large as
+sizeof(\fBvoid *\fR)\&.
+.PP
+The
+\fBaligned_alloc\fR\fB\fR
+function allocates
+\fIsize\fR
+bytes of memory such that the allocation\*(Aqs base address is an even multiple of
+\fIalignment\fR\&. The requested
+\fIalignment\fR
+must be a power of 2\&. Behavior is undefined if
+\fIsize\fR
+is not an integral multiple of
+\fIalignment\fR\&.
+.PP
+The
+\fBrealloc\fR\fB\fR
+function changes the size of the previously allocated memory referenced by
+\fIptr\fR
+to
+\fIsize\fR
+bytes\&. The contents of the memory are unchanged up to the lesser of the new and old sizes\&. If the new size is larger, the contents of the newly allocated portion of the memory are undefined\&. Upon success, the memory referenced by
+\fIptr\fR
+is freed and a pointer to the newly allocated memory is returned\&. Note that
+\fBrealloc\fR\fB\fR
+may move the memory allocation, resulting in a different return value than
+\fIptr\fR\&. If
+\fIptr\fR
+is
+\fBNULL\fR, the
+\fBrealloc\fR\fB\fR
+function behaves identically to
+\fBmalloc\fR\fB\fR
+for the specified size\&.
+.PP
+The
+\fBfree\fR\fB\fR
+function causes the allocated memory referenced by
+\fIptr\fR
+to be made available for future allocations\&. If
+\fIptr\fR
+is
+\fBNULL\fR, no action occurs\&.
+.SS "Non\-standard API"
+.PP
+The
+\fBmalloc_usable_size\fR\fB\fR
+function returns the usable size of the allocation pointed to by
+\fIptr\fR\&. The return value may be larger than the size that was requested during allocation\&. The
+\fBmalloc_usable_size\fR\fB\fR
+function is not a mechanism for in\-place
+\fBrealloc\fR\fB\fR; rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by
+\fBmalloc_usable_size\fR\fB\fR
+should not be depended on, since such behavior is entirely implementation\-dependent\&.
+.PP
+The
+\fBmalloc_stats_print\fR\fB\fR
+function writes human\-readable summary statistics via the
+\fIwrite_cb\fR
+callback function pointer and
+\fIcbopaque\fR
+data passed to
+\fIwrite_cb\fR, or
+\fBmalloc_message\fR\fB\fR
+if
+\fIwrite_cb\fR
+is
+\fBNULL\fR\&. This function can be called repeatedly\&. General information that never changes during execution can be omitted by specifying "g" as a character within the
+\fIopts\fR
+string\&. Note that
+\fBmalloc_message\fR\fB\fR
+uses the
+\fBmallctl*\fR\fB\fR
+functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If
+\fB\-\-enable\-stats\fR
+is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq and \(lql\(rq can be specified to omit per size class statistics for bins and large objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&.
+.PP
+The
+\fBmallctl\fR\fB\fR
+function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions\&. The period\-separated
+\fIname\fR
+argument specifies a location in a tree\-structured namespace; see the
+MALLCTL NAMESPACE
+section for documentation on the tree contents\&. To read a value, pass a pointer via
+\fIoldp\fR
+to adequate space to contain the value, and a pointer to its length via
+\fIoldlenp\fR; otherwise pass
+\fBNULL\fR
+and
+\fBNULL\fR\&. Similarly, to write a value, pass a pointer to the value via
+\fInewp\fR, and its length via
+\fInewlen\fR; otherwise pass
+\fBNULL\fR
+and
+\fB0\fR\&.
+.PP
+The
+\fBmallctlnametomib\fR\fB\fR
+function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a \(lqManagement Information Base\(rq (MIB) that can be passed repeatedly to
+\fBmallctlbymib\fR\fB\fR\&. Upon successful return from
+\fBmallctlnametomib\fR\fB\fR,
+\fImibp\fR
+contains an array of
+\fI*miblenp\fR
+integers, where
+\fI*miblenp\fR
+is the lesser of the number of components in
+\fIname\fR
+and the input value of
+\fI*miblenp\fR\&. Thus it is possible to pass a
+\fI*miblenp\fR
+that is smaller than the number of period\-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB\&. For name components that are integers (e\&.g\&. the 2 in
+"arenas\&.bin\&.2\&.size"), the corresponding MIB component will always be that integer\&. Therefore, it is legitimate to construct code like the following:
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+unsigned nbins, i;
+
+int mib[4];
+size_t len, miblen;
+
+len = sizeof(nbins);
+mallctl("arenas\&.nbins", &nbins, &len, NULL, 0);
+
+miblen = 4;
+mallnametomib("arenas\&.bin\&.0\&.size", mib, &miblen);
+for (i = 0; i < nbins; i++) {
+	size_t bin_size;
+
+	mib[2] = i;
+	len = sizeof(bin_size);
+	mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
+	/* Do something with bin_size\&.\&.\&. */
+}
+.fi
+.if n \{\
+.RE
+.\}
+.SS "Experimental API"
+.PP
+The experimental API is subject to change or removal without regard for backward compatibility\&. If
+\fB\-\-disable\-experimental\fR
+is specified during configuration, the experimental API is omitted\&.
+.PP
+The
+\fBallocm\fR\fB\fR,
+\fBrallocm\fR\fB\fR,
+\fBsallocm\fR\fB\fR,
+\fBdallocm\fR\fB\fR, and
+\fBnallocm\fR\fB\fR
+functions all have a
+\fIflags\fR
+argument that can be used to specify options\&. The functions only check the options that are contextually relevant\&. Use bitwise or (|) operations to specify one or more of the following:
+.PP
+\fBALLOCM_LG_ALIGN(\fR\fB\fIla\fR\fR\fB) \fR
+.RS 4
+Align the memory allocation to start at an address that is a multiple of
+(1 << \fIla\fR)\&. This macro does not validate that
+\fIla\fR
+is within the valid range\&.
+.RE
+.PP
+\fBALLOCM_ALIGN(\fR\fB\fIa\fR\fR\fB) \fR
+.RS 4
+Align the memory allocation to start at an address that is a multiple of
+\fIa\fR, where
+\fIa\fR
+is a power of two\&. This macro does not validate that
+\fIa\fR
+is a power of 2\&.
+.RE
+.PP
+\fBALLOCM_ZERO\fR
+.RS 4
+Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this option is absent, newly allocated memory is uninitialized\&.
+.RE
+.PP
+\fBALLOCM_NO_MOVE\fR
+.RS 4
+For reallocation, fail rather than moving the object\&. This constraint can apply to both growth and shrinkage\&.
+.RE
+.PP
+The
+\fBallocm\fR\fB\fR
+function allocates at least
+\fIsize\fR
+bytes of memory, sets
+\fI*ptr\fR
+to the base address of the allocation, and sets
+\fI*rsize\fR
+to the real size of the allocation if
+\fIrsize\fR
+is not
+\fBNULL\fR\&. Behavior is undefined if
+\fIsize\fR
+is
+\fB0\fR\&.
+.PP
+The
+\fBrallocm\fR\fB\fR
+function resizes the allocation at
+\fI*ptr\fR
+to be at least
+\fIsize\fR
+bytes, sets
+\fI*ptr\fR
+to the base address of the allocation if it moved, and sets
+\fI*rsize\fR
+to the real size of the allocation if
+\fIrsize\fR
+is not
+\fBNULL\fR\&. If
+\fIextra\fR
+is non\-zero, an attempt is made to resize the allocation to be at least
+\fIsize\fR + \fIextra\fR)
+bytes, though inability to allocate the extra byte(s) will not by itself result in failure\&. Behavior is undefined if
+\fIsize\fR
+is
+\fB0\fR, or if
+(\fIsize\fR + \fIextra\fR > \fBSIZE_T_MAX\fR)\&.
+.PP
+The
+\fBsallocm\fR\fB\fR
+function sets
+\fI*rsize\fR
+to the real size of the allocation\&.
+.PP
+The
+\fBdallocm\fR\fB\fR
+function causes the memory referenced by
+\fIptr\fR
+to be made available for future allocations\&.
+.PP
+The
+\fBnallocm\fR\fB\fR
+function allocates no memory, but it performs the same size computation as the
+\fBallocm\fR\fB\fR
+function, and if
+\fIrsize\fR
+is not
+\fBNULL\fR
+it sets
+\fI*rsize\fR
+to the real size of the allocation that would result from the equivalent
+\fBallocm\fR\fB\fR
+function call\&. Behavior is undefined if
+\fIsize\fR
+is
+\fB0\fR\&.
+.SH "TUNING"
+.PP
+Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile\- or run\-time\&.
+.PP
+The string pointed to by the global variable
+\fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named
+/etc/malloc\&.conf, and the value of the environment variable
+\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&.
+.PP
+An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each
+"opt\&.*"
+mallctl (see the
+MALLCTL NAMESPACE
+section for options documentation)\&. For example,
+abort:true,narenas:1
+sets the
+"opt\&.abort"
+and
+"opt\&.narenas"
+options\&. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values\&.
+.SH "IMPLEMENTATION NOTES"
+.PP
+Traditionally, allocators have used
+\fBsbrk\fR(2)
+to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory\&. If
+\fB\-\-enable\-dss\fR
+is specified during configuration, this allocator uses both
+\fBsbrk\fR(2)
+and
+\fBmmap\fR(2), in that order of preference; otherwise only
+\fBmmap\fR(2)
+is used\&.
+.PP
+This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi\-processor systems\&. This works well with regard to threading scalability, but incurs some costs\&. There is a small fixed per\-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation\&. These overheads are not generally an issue, given the number of arenas normally used\&. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance\&. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions\&.
+.PP
+In addition to multiple arenas, unless
+\fB\-\-disable\-tcache\fR
+is specified during configuration, this allocator supports thread\-specific caching for small and large objects, in order to make it possible to completely avoid synchronization for most allocation requests\&. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache\&.
+.PP
+Memory is conceptually broken into equal\-sized chunks, where the chunk size is a power of two that is greater than the page size\&. Chunks are always aligned to multiples of the chunk size\&. This alignment makes it possible to find metadata for user objects very quickly\&.
+.PP
+User objects are broken into three categories according to size: small, large, and huge\&. Small objects are smaller than one page\&. Large objects are smaller than the chunk size\&. Huge objects are a multiple of the chunk size\&. Small and large objects are managed by arenas; huge objects are managed separately in a single data structure that is shared by all threads\&. Huge objects are used by applications infrequently enough that this single data structure is not a scalability issue\&.
+.PP
+Each chunk that is managed by an arena tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object)\&. The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time\&.
+.PP
+Small objects are managed in groups by page runs\&. Each run maintains a frontier and free list to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least
+sizeof(\fBdouble\fR)\&. All other small object size classes are multiples of the quantum, spaced such that internal fragmentation is limited to approximately 25% for all but the smallest size classes\&. Allocation requests that are larger than the maximum small size class, but small enough to fit in an arena\-managed chunk (see the
+"opt\&.lg_chunk"
+option), are rounded up to the nearest run size\&. Allocation requests that are too large to fit in an arena\-managed chunk are rounded up to the nearest multiple of the chunk size\&.
+.PP
+Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&.
+.PP
+Assuming 4 MiB chunks, 4 KiB pages, and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in
+Table 1\&.
+.sp
+.it 1 an-trap
+.nr an-no-space-flag 1
+.nr an-break-flag 1
+.br
+.B Table\ \&1.\ \&Size classes
+.TS
+allbox tab(:);
+lB rB lB.
+T{
+Category
+T}:T{
+Spacing
+T}:T{
+Size
+T}
+.T&
+l r l
+^ r l
+^ r l
+^ r l
+^ r l
+^ r l
+^ r l
+l r l
+l r l.
+T{
+Small
+T}:T{
+lg
+T}:T{
+[8]
+T}
+:T{
+16
+T}:T{
+[16, 32, 48, \&.\&.\&., 128]
+T}
+:T{
+32
+T}:T{
+[160, 192, 224, 256]
+T}
+:T{
+64
+T}:T{
+[320, 384, 448, 512]
+T}
+:T{
+128
+T}:T{
+[640, 768, 896, 1024]
+T}
+:T{
+256
+T}:T{
+[1280, 1536, 1792, 2048]
+T}
+:T{
+512
+T}:T{
+[2560, 3072, 3584]
+T}
+T{
+Large
+T}:T{
+4 KiB
+T}:T{
+[4 KiB, 8 KiB, 12 KiB, \&.\&.\&., 4072 KiB]
+T}
+T{
+Huge
+T}:T{
+4 MiB
+T}:T{
+[4 MiB, 8 MiB, 12 MiB, \&.\&.\&.]
+T}
+.TE
+.sp 1
+.SH "MALLCTL NAMESPACE"
+.PP
+The following names are defined in the namespace accessible via the
+\fBmallctl*\fR\fB\fR
+functions\&. Value types are specified in parentheses, their readable/writable statuses are encoded as
+rw,
+r\-,
+\-w, or
+\-\-, and required build configuration flags follow, if any\&. A name element encoded as
+<i>
+or
+<j>
+indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection\&. In the case of
+"stats\&.arenas\&.<i>\&.*",
+<i>
+equal to
+"arenas\&.narenas"
+can be used to access the summation of statistics from all arenas\&. Take special note of the
+"epoch"
+mallctl, which controls refreshing of cached dynamic statistics\&.
+.PP
+"version" (\fBconst char *\fR) r\-
+.RS 4
+Return the jemalloc version string\&.
+.RE
+.PP
+"epoch" (\fBuint64_t\fR) rw
+.RS 4
+If a value is passed in, refresh the data from which the
+\fBmallctl*\fR\fB\fR
+functions report values, and increment the epoch\&. Return the current epoch\&. This is useful for detecting whether another thread caused a refresh\&.
+.RE
+.PP
+"config\&.debug" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-debug\fR
+was specified during build configuration\&.
+.RE
+.PP
+"config\&.dss" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-dss\fR
+was specified during build configuration\&.
+.RE
+.PP
+"config\&.fill" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-fill\fR
+was specified during build configuration\&.
+.RE
+.PP
+"config\&.lazy_lock" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-lazy\-lock\fR
+was specified during build configuration\&.
+.RE
+.PP
+"config\&.munmap" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-munmap\fR
+was specified during build configuration\&.
+.RE
+.PP
+"config\&.prof" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-prof\fR
+was specified during build configuration\&.
+.RE
+.PP
+"config\&.prof_libgcc" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-disable\-prof\-libgcc\fR
+was not specified during build configuration\&.
+.RE
+.PP
+"config\&.prof_libunwind" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-prof\-libunwind\fR
+was specified during build configuration\&.
+.RE
+.PP
+"config\&.stats" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-stats\fR
+was specified during build configuration\&.
+.RE
+.PP
+"config\&.tcache" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-disable\-tcache\fR
+was not specified during build configuration\&.
+.RE
+.PP
+"config\&.tls" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-disable\-tls\fR
+was not specified during build configuration\&.
+.RE
+.PP
+"config\&.utrace" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-utrace\fR
+was specified during build configuration\&.
+.RE
+.PP
+"config\&.valgrind" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-valgrind\fR
+was specified during build configuration\&.
+.RE
+.PP
+"config\&.xmalloc" (\fBbool\fR) r\-
+.RS 4
+\fB\-\-enable\-xmalloc\fR
+was specified during build configuration\&.
+.RE
+.PP
+"opt\&.abort" (\fBbool\fR) r\-
+.RS 4
+Abort\-on\-warning enabled/disabled\&. If true, most warnings are fatal\&. The process will call
+\fBabort\fR(3)
+in these cases\&. This option is disabled by default unless
+\fB\-\-enable\-debug\fR
+is specified during configuration, in which case it is enabled by default\&.
+.RE
+.PP
+"opt\&.lg_chunk" (\fBsize_t\fR) r\-
+.RS 4
+Virtual memory chunk size (log base 2)\&. The default chunk size is 4 MiB (2^22)\&.
+.RE
+.PP
+"opt\&.narenas" (\fBsize_t\fR) r\-
+.RS 4
+Maximum number of arenas to use\&. The default maximum number of arenas is four times the number of CPUs, or one if there is a single CPU\&.
+.RE
+.PP
+"opt\&.lg_dirty_mult" (\fBssize_t\fR) r\-
+.RS 4
+Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via
+\fBmadvise\fR(2)
+or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 32:1 (2^5:1); an option value of \-1 will disable dirty page purging\&.
+.RE
+.PP
+"opt\&.stats_print" (\fBbool\fR) r\-
+.RS 4
+Enable/disable statistics printing at exit\&. If enabled, the
+\fBmalloc_stats_print\fR\fB\fR
+function is called at program exit via an
+\fBatexit\fR(3)
+function\&. If
+\fB\-\-enable\-stats\fR
+is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&.
+.RE
+.PP
+"opt\&.junk" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
+.RS 4
+Junk filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to
+0xa5\&. All deallocated memory will be initialized to
+0x5a\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default unless
+\fB\-\-enable\-debug\fR
+is specified during configuration, in which case it is enabled by default\&.
+.RE
+.PP
+"opt\&.quarantine" (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR]
+.RS 4
+Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory\&. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk\-filled if the
+"opt\&.junk"
+option is enabled\&. This feature is of particular use in combination with
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0\&.
+.RE
+.PP
+"opt\&.redzone" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
+.RS 4
+Redzones enabled/disabled\&. If enabled, small allocations have redzones before and after them\&. Furthermore, if the
+"opt\&.junk"
+option is enabled, the redzones are checked for corruption during deallocation\&. However, the primary intended purpose of this feature is to be used in combination with
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default\&.
+.RE
+.PP
+"opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
+.RS 4
+Zero filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to 0\&. Note that this initialization only happens once for each byte, so
+\fBrealloc\fR\fB\fR
+and
+\fBrallocm\fR\fB\fR
+calls do not zero memory that was previously allocated\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default\&.
+.RE
+.PP
+"opt\&.utrace" (\fBbool\fR) r\- [\fB\-\-enable\-utrace\fR]
+.RS 4
+Allocation tracing based on
+\fButrace\fR(2)
+enabled/disabled\&. This option is disabled by default\&.
+.RE
+.PP
+"opt\&.valgrind" (\fBbool\fR) r\- [\fB\-\-enable\-valgrind\fR]
+.RS 4
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
+support enabled/disabled\&. If enabled, several other options are automatically modified during options processing to work well with Valgrind:
+"opt\&.junk"
+and
+"opt\&.zero"
+are set to false,
+"opt\&.quarantine"
+is set to 16 MiB, and
+"opt\&.redzone"
+is set to true\&. This option is disabled by default\&.
+.RE
+.PP
+"opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR]
+.RS 4
+Abort\-on\-out\-of\-memory enabled/disabled\&. If enabled, rather than returning failure for any allocation function, display a diagnostic message on
+\fBSTDERR_FILENO\fR
+and cause the program to drop core (using
+\fBabort\fR(3))\&. If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code:
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+malloc_conf = "xmalloc:true";
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+This option is disabled by default\&.
+.RE
+.PP
+"opt\&.tcache" (\fBbool\fR) r\- [\fB\-\-enable\-tcache\fR]
+.RS 4
+Thread\-specific caching enabled/disabled\&. When there are multiple threads, each thread uses a thread\-specific cache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the
+"opt\&.lg_tcache_max"
+option for related tuning information\&. This option is enabled by default\&.
+.RE
+.PP
+"opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
+.RS 4
+Maximum size class (log base 2) to cache in the thread\-specific cache\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&.
+.RE
+.PP
+"opt\&.prof" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Memory profiling enabled/disabled\&. If enabled, profile memory allocation activity, and use an
+\fBatexit\fR(3)
+function to dump final memory usage to a file named according to the pattern
+<prefix>\&.<pid>\&.<seq>\&.f\&.heap, where
+<prefix>
+is controlled by the
+"opt\&.prof_prefix"
+option\&. See the
+"opt\&.prof_active"
+option for on\-the\-fly activation/deactivation\&. See the
+"opt\&.lg_prof_sample"
+option for probabilistic sampling control\&. See the
+"opt\&.prof_accum"
+option for control of cumulative sample reporting\&. See the
+"opt\&.lg_prof_interval"
+option for information on interval\-triggered profile dumping, and the
+"opt\&.prof_gdump"
+option for information on high\-water\-triggered profile dumping\&. Profile output is compatible with the included
+\fBpprof\fR
+Perl script, which originates from the
+\m[blue]\fBgoogle\-perftools package\fR\m[]\&\s-2\u[3]\d\s+2\&.
+.RE
+.PP
+"opt\&.prof_prefix" (\fBconst char *\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Filename prefix for profile dumps\&. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled)\&. The default prefix is
+jeprof\&.
+.RE
+.PP
+"opt\&.prof_active" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Profiling activated/deactivated\&. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the
+"opt\&.prof"
+option) but inactive, then toggle profiling at any time during program execution with the
+"prof\&.active"
+mallctl\&. This option is enabled by default\&.
+.RE
+.PP
+"opt\&.lg_prof_sample" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity\&. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead\&. The default sample interval is 1 (2^0) (i\&.e\&. all allocations are sampled)\&.
+.RE
+.PP
+"opt\&.prof_accum" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Reporting of cumulative object/byte counts in profile dumps enabled/disabled\&. If this option is enabled, every unique backtrace must be stored for the duration of execution\&. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest\&. This option is enabled by default\&.
+.RE
+.PP
+"opt\&.lg_prof_interval" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity\&. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks\&. Profiles are dumped to files named according to the pattern
+<prefix>\&.<pid>\&.<seq>\&.i<iseq>\&.heap, where
+<prefix>
+is controlled by the
+"opt\&.prof_prefix"
+option\&. By default, interval\-triggered profile dumping is disabled (encoded as \-1)\&.
+.RE
+.PP
+"opt\&.prof_gdump" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Trigger a memory profile dump every time the total virtual memory exceeds the previous maximum\&. Profiles are dumped to files named according to the pattern
+<prefix>\&.<pid>\&.<seq>\&.u<useq>\&.heap, where
+<prefix>
+is controlled by the
+"opt\&.prof_prefix"
+option\&. This option is disabled by default\&.
+.RE
+.PP
+"opt\&.prof_leak" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Leak reporting enabled/disabled\&. If enabled, use an
+\fBatexit\fR(3)
+function to report memory leaks detected by allocation sampling\&. See the
+"opt\&.prof"
+option for information on analyzing heap profile output\&. This option is disabled by default\&.
+.RE
+.PP
+"thread\&.arena" (\fBunsigned\fR) rw
+.RS 4
+Get or set the arena associated with the calling thread\&. The arena index must be less than the maximum number of arenas (see the
+"arenas\&.narenas"
+mallctl)\&. If the specified arena was not initialized beforehand (see the
+"arenas\&.initialized"
+mallctl), it will be automatically initialized as a side effect of calling this interface\&.
+.RE
+.PP
+"thread\&.allocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Get the total number of bytes ever allocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&.
+.RE
+.PP
+"thread\&.allocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Get a pointer to the the value that is returned by the
+"thread\&.allocated"
+mallctl\&. This is useful for avoiding the overhead of repeated
+\fBmallctl*\fR\fB\fR
+calls\&.
+.RE
+.PP
+"thread\&.deallocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Get the total number of bytes ever deallocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&.
+.RE
+.PP
+"thread\&.deallocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Get a pointer to the the value that is returned by the
+"thread\&.deallocated"
+mallctl\&. This is useful for avoiding the overhead of repeated
+\fBmallctl*\fR\fB\fR
+calls\&.
+.RE
+.PP
+"thread\&.tcache\&.enabled" (\fBbool\fR) rw [\fB\-\-enable\-tcache\fR]
+.RS 4
+Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed as a side effect of becoming disabled (see
+"thread\&.tcache\&.flush")\&.
+.RE
+.PP
+"thread\&.tcache\&.flush" (\fBvoid\fR) \-\- [\fB\-\-enable\-tcache\fR]
+.RS 4
+Flush calling thread\*(Aqs tcache\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs thread\-specific cache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&.
+.RE
+.PP
+"arenas\&.narenas" (\fBunsigned\fR) r\-
+.RS 4
+Maximum number of arenas\&.
+.RE
+.PP
+"arenas\&.initialized" (\fBbool *\fR) r\-
+.RS 4
+An array of
+"arenas\&.narenas"
+booleans\&. Each boolean indicates whether the corresponding arena is initialized\&.
+.RE
+.PP
+"arenas\&.quantum" (\fBsize_t\fR) r\-
+.RS 4
+Quantum size\&.
+.RE
+.PP
+"arenas\&.page" (\fBsize_t\fR) r\-
+.RS 4
+Page size\&.
+.RE
+.PP
+"arenas\&.tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
+.RS 4
+Maximum thread\-cached size class\&.
+.RE
+.PP
+"arenas\&.nbins" (\fBunsigned\fR) r\-
+.RS 4
+Number of bin size classes\&.
+.RE
+.PP
+"arenas\&.nhbins" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR]
+.RS 4
+Total number of thread cache bin size classes\&.
+.RE
+.PP
+"arenas\&.bin\&.<i>\&.size" (\fBsize_t\fR) r\-
+.RS 4
+Maximum size supported by size class\&.
+.RE
+.PP
+"arenas\&.bin\&.<i>\&.nregs" (\fBuint32_t\fR) r\-
+.RS 4
+Number of regions per page run\&.
+.RE
+.PP
+"arenas\&.bin\&.<i>\&.run_size" (\fBsize_t\fR) r\-
+.RS 4
+Number of bytes per page run\&.
+.RE
+.PP
+"arenas\&.nlruns" (\fBsize_t\fR) r\-
+.RS 4
+Total number of large size classes\&.
+.RE
+.PP
+"arenas\&.lrun\&.<i>\&.size" (\fBsize_t\fR) r\-
+.RS 4
+Maximum size supported by this large size class\&.
+.RE
+.PP
+"arenas\&.purge" (\fBunsigned\fR) \-w
+.RS 4
+Purge unused dirty pages for the specified arena, or for all arenas if none is specified\&.
+.RE
+.PP
+"prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
+.RS 4
+Control whether sampling is currently active\&. See the
+"opt\&.prof_active"
+option for additional information\&.
+.RE
+.PP
+"prof\&.dump" (\fBconst char *\fR) \-w [\fB\-\-enable\-prof\fR]
+.RS 4
+Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern
+<prefix>\&.<pid>\&.<seq>\&.m<mseq>\&.heap, where
+<prefix>
+is controlled by the
+"opt\&.prof_prefix"
+option\&.
+.RE
+.PP
+"prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Average number of bytes allocated between inverval\-based profile dumps\&. See the
+"opt\&.lg_prof_interval"
+option for additional information\&.
+.RE
+.PP
+"stats\&.cactive" (\fBsize_t *\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Pointer to a counter that contains an approximate count of the current number of bytes in active pages\&. The estimate may be high, but never low, because each arena rounds up to the nearest multiple of the chunk size when computing its contribution to the counter\&. Note that the
+"epoch"
+mallctl has no bearing on this counter\&. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer\&.
+.RE
+.PP
+"stats\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Total number of bytes allocated by the application\&.
+.RE
+.PP
+"stats\&.active" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to
+"stats\&.allocated"\&.
+.RE
+.PP
+"stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Total number of bytes in chunks mapped on behalf of the application\&. This is a multiple of the chunk size, and is at least as large as
+"stats\&.active"\&. This does not include inactive chunks embedded in the DSS\&.
+.RE
+.PP
+"stats\&.chunks\&.current" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Total number of chunks actively mapped on behalf of the application\&. This does not include inactive chunks embedded in the DSS\&.
+.RE
+.PP
+"stats\&.chunks\&.total" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of chunks allocated\&.
+.RE
+.PP
+"stats\&.chunks\&.high" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Maximum number of active chunks at any time thus far\&.
+.RE
+.PP
+"stats\&.huge\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of bytes currently allocated by huge objects\&.
+.RE
+.PP
+"stats\&.huge\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of huge allocation requests\&.
+.RE
+.PP
+"stats\&.huge\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of huge deallocation requests\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.nthreads" (\fBunsigned\fR) r\-
+.RS 4
+Number of threads currently assigned to arena\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.pactive" (\fBsize_t\fR) r\-
+.RS 4
+Number of pages in active runs\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.pdirty" (\fBsize_t\fR) r\-
+.RS 4
+Number of pages within unused runs that are potentially dirty, and for which
+\fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR
+or similar has not been called\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of mapped bytes\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.npurge" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of dirty page purge sweeps performed\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.nmadvise" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of
+\fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR
+or similar calls made to purge dirty pages\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.npurged" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of pages purged\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.small\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of bytes currently allocated by small objects\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.small\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of allocation requests served by small bins\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.small\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of small objects returned to bins\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.small\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of small allocation requests\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.large\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of bytes currently allocated by large objects\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.large\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of large allocation requests served directly by the arena\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.large\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of large deallocation requests served directly by the arena\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.large\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of large allocation requests\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Current number of bytes allocated by bin\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of allocations served by bin\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of allocations returned to bin\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of allocation requests\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nfills" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
+.RS 4
+Cumulative number of tcache fills\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nflushes" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
+.RS 4
+Cumulative number of tcache flushes\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of runs created\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nreruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of times the current run from which to allocate changed\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Current number of runs\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of allocation requests for this size class served directly by the arena\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of deallocation requests for this size class served directly by the arena\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of allocation requests for this size class\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Current number of runs for this size class\&.
+.RE
+.SH "DEBUGGING MALLOC PROBLEMS"
+.PP
+When debugging, it is a good idea to configure/build jemalloc with the
+\fB\-\-enable\-debug\fR
+and
+\fB\-\-enable\-fill\fR
+options, and recompile the program with suitable options and symbols for debugger support\&. When so configured, jemalloc incorporates a wide variety of run\-time assertions that catch application errors such as double\-free, write\-after\-free, etc\&.
+.PP
+Programs often accidentally depend on \(lquninitialized\(rq memory actually being filled with zero bytes\&. Junk filling (see the
+"opt\&.junk"
+option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps\&. Conversely, zero filling (see the
+"opt\&.zero"
+option) eliminates the symptoms of such bugs\&. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs\&.
+.PP
+This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive\&. However, jemalloc does integrate with the most excellent
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
+tool if the
+\fB\-\-enable\-valgrind\fR
+configuration option is enabled and the
+"opt\&.valgrind"
+option is enabled\&.
+.SH "DIAGNOSTIC MESSAGES"
+.PP
+If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor
+\fBSTDERR_FILENO\fR\&. Errors will result in the process dumping core\&. If the
+"opt\&.abort"
+option is set, most warnings are treated as errors\&.
+.PP
+The
+\fImalloc_message\fR
+variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the
+\fBSTDERR_FILENO\fR
+file descriptor is not suitable for this\&.
+\fBmalloc_message\fR\fB\fR
+takes the
+\fIcbopaque\fR
+pointer argument that is
+\fBNULL\fR
+unless overridden by the arguments in a call to
+\fBmalloc_stats_print\fR\fB\fR, followed by a string pointer\&. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock\&.
+.PP
+All messages are prefixed by \(lq<jemalloc>:\(rq\&.
+.SH "RETURN VALUES"
+.SS "Standard API"
+.PP
+The
+\fBmalloc\fR\fB\fR
+and
+\fBcalloc\fR\fB\fR
+functions return a pointer to the allocated memory if successful; otherwise a
+\fBNULL\fR
+pointer is returned and
+\fIerrno\fR
+is set to
+ENOMEM\&.
+.PP
+The
+\fBposix_memalign\fR\fB\fR
+function returns the value 0 if successful; otherwise it returns an error value\&. The
+\fBposix_memalign\fR\fB\fR
+function will fail if:
+.PP
+EINVAL
+.RS 4
+The
+\fIalignment\fR
+parameter is not a power of 2 at least as large as
+sizeof(\fBvoid *\fR)\&.
+.RE
+.PP
+ENOMEM
+.RS 4
+Memory allocation error\&.
+.RE
+.PP
+The
+\fBaligned_alloc\fR\fB\fR
+function returns a pointer to the allocated memory if successful; otherwise a
+\fBNULL\fR
+pointer is returned and
+\fIerrno\fR
+is set\&. The
+\fBaligned_alloc\fR\fB\fR
+function will fail if:
+.PP
+EINVAL
+.RS 4
+The
+\fIalignment\fR
+parameter is not a power of 2\&.
+.RE
+.PP
+ENOMEM
+.RS 4
+Memory allocation error\&.
+.RE
+.PP
+The
+\fBrealloc\fR\fB\fR
+function returns a pointer, possibly identical to
+\fIptr\fR, to the allocated memory if successful; otherwise a
+\fBNULL\fR
+pointer is returned, and
+\fIerrno\fR
+is set to
+ENOMEM
+if the error was the result of an allocation failure\&. The
+\fBrealloc\fR\fB\fR
+function always leaves the original buffer intact when an error occurs\&.
+.PP
+The
+\fBfree\fR\fB\fR
+function returns no value\&.
+.SS "Non\-standard API"
+.PP
+The
+\fBmalloc_usable_size\fR\fB\fR
+function returns the usable size of the allocation pointed to by
+\fIptr\fR\&.
+.PP
+The
+\fBmallctl\fR\fB\fR,
+\fBmallctlnametomib\fR\fB\fR, and
+\fBmallctlbymib\fR\fB\fR
+functions return 0 on success; otherwise they return an error value\&. The functions will fail if:
+.PP
+EINVAL
+.RS 4
+\fInewp\fR
+is not
+\fBNULL\fR, and
+\fInewlen\fR
+is too large or too small\&. Alternatively,
+\fI*oldlenp\fR
+is too large or too small; in this case as much data as possible are read despite the error\&.
+.RE
+.PP
+ENOMEM
+.RS 4
+\fI*oldlenp\fR
+is too short to hold the requested value\&.
+.RE
+.PP
+ENOENT
+.RS 4
+\fIname\fR
+or
+\fImib\fR
+specifies an unknown/invalid value\&.
+.RE
+.PP
+EPERM
+.RS 4
+Attempt to read or write void value, or attempt to write read\-only value\&.
+.RE
+.PP
+EAGAIN
+.RS 4
+A memory allocation failure occurred\&.
+.RE
+.PP
+EFAULT
+.RS 4
+An interface with side effects failed in some way not directly related to
+\fBmallctl*\fR\fB\fR
+read/write processing\&.
+.RE
+.SS "Experimental API"
+.PP
+The
+\fBallocm\fR\fB\fR,
+\fBrallocm\fR\fB\fR,
+\fBsallocm\fR\fB\fR,
+\fBdallocm\fR\fB\fR, and
+\fBnallocm\fR\fB\fR
+functions return
+\fBALLOCM_SUCCESS\fR
+on success; otherwise they return an error value\&. The
+\fBallocm\fR\fB\fR,
+\fBrallocm\fR\fB\fR, and
+\fBnallocm\fR\fB\fR
+functions will fail if:
+.PP
+ALLOCM_ERR_OOM
+.RS 4
+Out of memory\&. Insufficient contiguous memory was available to service the allocation request\&. The
+\fBallocm\fR\fB\fR
+function additionally sets
+\fI*ptr\fR
+to
+\fBNULL\fR, whereas the
+\fBrallocm\fR\fB\fR
+function leaves
+\fB*ptr\fR
+unmodified\&.
+.RE
+The
+\fBrallocm\fR\fB\fR
+function will also fail if:
+.PP
+ALLOCM_ERR_NOT_MOVED
+.RS 4
+\fBALLOCM_NO_MOVE\fR
+was specified, but the reallocation request could not be serviced without moving the object\&.
+.RE
+.SH "ENVIRONMENT"
+.PP
+The following environment variable affects the execution of the allocation functions:
+.PP
+\fBMALLOC_CONF\fR
+.RS 4
+If the environment variable
+\fBMALLOC_CONF\fR
+is set, the characters it contains will be interpreted as options\&.
+.RE
+.SH "EXAMPLES"
+.PP
+To dump core whenever a problem occurs:
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+ln \-s \*(Aqabort:true\*(Aq /etc/malloc\&.conf
+.fi
+.if n \{\
+.RE
+.\}
+.PP
+To specify in the source a chunk size that is 16 MiB:
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+malloc_conf = "lg_chunk:24";
+.fi
+.if n \{\
+.RE
+.\}
+.SH "SEE ALSO"
+.PP
+\fBmadvise\fR(2),
+\fBmmap\fR(2),
+\fBsbrk\fR(2),
+\fButrace\fR(2),
+\fBalloca\fR(3),
+\fBatexit\fR(3),
+\fBgetpagesize\fR(3)
+.SH "STANDARDS"
+.PP
+The
+\fBmalloc\fR\fB\fR,
+\fBcalloc\fR\fB\fR,
+\fBrealloc\fR\fB\fR, and
+\fBfree\fR\fB\fR
+functions conform to ISO/IEC 9899:1990 (\(lqISO C90\(rq)\&.
+.PP
+The
+\fBposix_memalign\fR\fB\fR
+function conforms to IEEE Std 1003\&.1\-2001 (\(lqPOSIX\&.1\(rq)\&.
+.SH "HISTORY"
+.PP
+The
+\fBmalloc_usable_size\fR\fB\fR
+and
+\fBposix_memalign\fR\fB\fR
+functions first appeared in FreeBSD 7\&.0\&.
+.PP
+The
+\fBaligned_alloc\fR\fB\fR,
+\fBmalloc_stats_print\fR\fB\fR,
+\fBmallctl*\fR\fB\fR, and
+\fB*allocm\fR\fB\fR
+functions first appeared in FreeBSD 10\&.0\&.
+.SH "AUTHOR"
+.PP
+\fBJason Evans\fR
+.RS 4
+.RE
+.SH "NOTES"
+.IP " 1." 4
+jemalloc website
+.RS 4
+\%http://www.canonware.com/jemalloc/
+.RE
+.IP " 2." 4
+Valgrind
+.RS 4
+\%http://http://valgrind.org/
+.RE
+.IP " 3." 4
+google-perftools package
+.RS 4
+\%http://code.google.com/p/google-perftools/
+.RE
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/arena.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/arena.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,685 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/*
+ * RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
+ * as small as possible such that this setting is still honored, without
+ * violating other constraints.  The goal is to make runs as small as possible
+ * without exceeding a per run external fragmentation threshold.
+ *
+ * We use binary fixed point math for overhead computations, where the binary
+ * point is implicitly RUN_BFP bits to the left.
+ *
+ * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
+ * honored for some/all object sizes, since when heap profiling is enabled
+ * there is one pointer of header overhead per object (plus a constant).  This
+ * constraint is relaxed (ignored) for runs that are so small that the
+ * per-region overhead is greater than:
+ *
+ *   (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP))
+ */
+#define	RUN_BFP			12
+/*                                    \/   Implicit binary fixed point. */
+#define	RUN_MAX_OVRHD		0x0000003dU
+#define	RUN_MAX_OVRHD_RELAX	0x00001800U
+
+/* Maximum number of regions in one run. */
+#define	LG_RUN_MAXREGS		11
+#define	RUN_MAXREGS		(1U << LG_RUN_MAXREGS)
+
+/*
+ * Minimum redzone size.  Redzones may be larger than this if necessary to
+ * preserve region alignment.
+ */
+#define	REDZONE_MINSIZE		16
+
+/*
+ * The minimum ratio of active:dirty pages per arena is computed as:
+ *
+ *   (nactive >> opt_lg_dirty_mult) >= ndirty
+ *
+ * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
+ * times as many active pages as dirty pages.
+ */
+#define	LG_DIRTY_MULT_DEFAULT	5
+
+typedef struct arena_chunk_map_s arena_chunk_map_t;
+typedef struct arena_chunk_s arena_chunk_t;
+typedef struct arena_run_s arena_run_t;
+typedef struct arena_bin_info_s arena_bin_info_t;
+typedef struct arena_bin_s arena_bin_t;
+typedef struct arena_s arena_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+/* Each element of the chunk map corresponds to one page within the chunk. */
+struct arena_chunk_map_s {
+#ifndef JEMALLOC_PROF
+	/*
+	 * Overlay prof_ctx in order to allow it to be referenced by dead code.
+	 * Such antics aren't warranted for per arena data structures, but
+	 * chunk map overhead accounts for a percentage of memory, rather than
+	 * being just a fixed cost.
+	 */
+	union {
+#endif
+	union {
+		/*
+		 * Linkage for run trees.  There are two disjoint uses:
+		 *
+		 * 1) arena_t's runs_avail_{clean,dirty} trees.
+		 * 2) arena_run_t conceptually uses this linkage for in-use
+		 *    non-full runs, rather than directly embedding linkage.
+		 */
+		rb_node(arena_chunk_map_t)	rb_link;
+		/*
+		 * List of runs currently in purgatory.  arena_chunk_purge()
+		 * temporarily allocates runs that contain dirty pages while
+		 * purging, so that other threads cannot use the runs while the
+		 * purging thread is operating without the arena lock held.
+		 */
+		ql_elm(arena_chunk_map_t)	ql_link;
+	}				u;
+
+	/* Profile counters, used for large object runs. */
+	prof_ctx_t			*prof_ctx;
+#ifndef JEMALLOC_PROF
+	}; /* union { ... }; */
+#endif
+
+	/*
+	 * Run address (or size) and various flags are stored together.  The bit
+	 * layout looks like (assuming 32-bit system):
+	 *
+	 *   ???????? ???????? ????---- ----dula
+	 *
+	 * ? : Unallocated: Run address for first/last pages, unset for internal
+	 *                  pages.
+	 *     Small: Run page offset.
+	 *     Large: Run size for first page, unset for trailing pages.
+	 * - : Unused.
+	 * d : dirty?
+	 * u : unzeroed?
+	 * l : large?
+	 * a : allocated?
+	 *
+	 * Following are example bit patterns for the three types of runs.
+	 *
+	 * p : run page offset
+	 * s : run size
+	 * c : (binind+1) for size class (used only if prof_promote is true)
+	 * x : don't care
+	 * - : 0
+	 * + : 1
+	 * [DULA] : bit set
+	 * [dula] : bit unset
+	 *
+	 *   Unallocated (clean):
+	 *     ssssssss ssssssss ssss---- ----du-a
+	 *     xxxxxxxx xxxxxxxx xxxx---- -----Uxx
+	 *     ssssssss ssssssss ssss---- ----dU-a
+	 *
+	 *   Unallocated (dirty):
+	 *     ssssssss ssssssss ssss---- ----D--a
+	 *     xxxxxxxx xxxxxxxx xxxx---- ----xxxx
+	 *     ssssssss ssssssss ssss---- ----D--a
+	 *
+	 *   Small:
+	 *     pppppppp pppppppp pppp---- ----d--A
+	 *     pppppppp pppppppp pppp---- -------A
+	 *     pppppppp pppppppp pppp---- ----d--A
+	 *
+	 *   Large:
+	 *     ssssssss ssssssss ssss---- ----D-LA
+	 *     xxxxxxxx xxxxxxxx xxxx---- ----xxxx
+	 *     -------- -------- -------- ----D-LA
+	 *
+	 *   Large (sampled, size <= PAGE):
+	 *     ssssssss ssssssss sssscccc ccccD-LA
+	 *
+	 *   Large (not sampled, size == PAGE):
+	 *     ssssssss ssssssss ssss---- ----D-LA
+	 */
+	size_t				bits;
+#define	CHUNK_MAP_CLASS_SHIFT	4
+#define	CHUNK_MAP_CLASS_MASK	((size_t)0xff0U)
+#define	CHUNK_MAP_FLAGS_MASK	((size_t)0xfU)
+#define	CHUNK_MAP_DIRTY		((size_t)0x8U)
+#define	CHUNK_MAP_UNZEROED	((size_t)0x4U)
+#define	CHUNK_MAP_LARGE		((size_t)0x2U)
+#define	CHUNK_MAP_ALLOCATED	((size_t)0x1U)
+#define	CHUNK_MAP_KEY		CHUNK_MAP_ALLOCATED
+};
+typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
+typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
+
+/* Arena chunk header. */
+struct arena_chunk_s {
+	/* Arena that owns the chunk. */
+	arena_t		*arena;
+
+	/* Linkage for the arena's chunks_dirty list. */
+	ql_elm(arena_chunk_t) link_dirty;
+
+	/*
+	 * True if the chunk is currently in the chunks_dirty list, due to
+	 * having at some point contained one or more dirty pages.  Removal
+	 * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
+	 */
+	bool		dirtied;
+
+	/* Number of dirty pages. */
+	size_t		ndirty;
+
+	/*
+	 * Map of pages within chunk that keeps track of free/large/small.  The
+	 * first map_bias entries are omitted, since the chunk header does not
+	 * need to be tracked in the map.  This omission saves a header page
+	 * for common chunk sizes (e.g. 4 MiB).
+	 */
+	arena_chunk_map_t map[1]; /* Dynamically sized. */
+};
+typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
+
+struct arena_run_s {
+	/* Bin this run is associated with. */
+	arena_bin_t	*bin;
+
+	/* Index of next region that has never been allocated, or nregs. */
+	uint32_t	nextind;
+
+	/* Number of free regions in run. */
+	unsigned	nfree;
+};
+
+/*
+ * Read-only information associated with each element of arena_t's bins array
+ * is stored separately, partly to reduce memory usage (only one copy, rather
+ * than one per arena), but mainly to avoid false cacheline sharing.
+ *
+ * Each run has the following layout:
+ *
+ *               /--------------------\
+ *               | arena_run_t header |
+ *               | ...                |
+ * bitmap_offset | bitmap             |
+ *               | ...                |
+ *   ctx0_offset | ctx map            |
+ *               | ...                |
+ *               |--------------------|
+ *               | redzone            |
+ *   reg0_offset | region 0           |
+ *               | redzone            |
+ *               |--------------------| \
+ *               | redzone            | |
+ *               | region 1           |  > reg_interval
+ *               | redzone            | /
+ *               |--------------------|
+ *               | ...                |
+ *               | ...                |
+ *               | ...                |
+ *               |--------------------|
+ *               | redzone            |
+ *               | region nregs-1     |
+ *               | redzone            |
+ *               |--------------------|
+ *               | alignment pad?     |
+ *               \--------------------/
+ *
+ * reg_interval has at least the same minimum alignment as reg_size; this
+ * preserves the alignment constraint that sa2u() depends on.  Alignment pad is
+ * either 0 or redzone_size; it is present only if needed to align reg0_offset.
+ */
+struct arena_bin_info_s {
+	/* Size of regions in a run for this bin's size class. */
+	size_t		reg_size;
+
+	/* Redzone size. */
+	size_t		redzone_size;
+
+	/* Interval between regions (reg_size + (redzone_size << 1)). */
+	size_t		reg_interval;
+
+	/* Total size of a run for this bin's size class. */
+	size_t		run_size;
+
+	/* Total number of regions in a run for this bin's size class. */
+	uint32_t	nregs;
+
+	/*
+	 * Offset of first bitmap_t element in a run header for this bin's size
+	 * class.
+	 */
+	uint32_t	bitmap_offset;
+
+	/*
+	 * Metadata used to manipulate bitmaps for runs associated with this
+	 * bin.
+	 */
+	bitmap_info_t	bitmap_info;
+
+	/*
+	 * Offset of first (prof_ctx_t *) in a run header for this bin's size
+	 * class, or 0 if (config_prof == false || opt_prof == false).
+	 */
+	uint32_t	ctx0_offset;
+
+	/* Offset of first region in a run for this bin's size class. */
+	uint32_t	reg0_offset;
+};
+
+struct arena_bin_s {
+	/*
+	 * All operations on runcur, runs, and stats require that lock be
+	 * locked.  Run allocation/deallocation are protected by the arena lock,
+	 * which may be acquired while holding one or more bin locks, but not
+	 * vise versa.
+	 */
+	malloc_mutex_t	lock;
+
+	/*
+	 * Current run being used to service allocations of this bin's size
+	 * class.
+	 */
+	arena_run_t	*runcur;
+
+	/*
+	 * Tree of non-full runs.  This tree is used when looking for an
+	 * existing run when runcur is no longer usable.  We choose the
+	 * non-full run that is lowest in memory; this policy tends to keep
+	 * objects packed well, and it can also help reduce the number of
+	 * almost-empty chunks.
+	 */
+	arena_run_tree_t runs;
+
+	/* Bin statistics. */
+	malloc_bin_stats_t stats;
+};
+
+struct arena_s {
+	/* This arena's index within the arenas array. */
+	unsigned		ind;
+
+	/*
+	 * Number of threads currently assigned to this arena.  This field is
+	 * protected by arenas_lock.
+	 */
+	unsigned		nthreads;
+
+	/*
+	 * There are three classes of arena operations from a locking
+	 * perspective:
+	 * 1) Thread asssignment (modifies nthreads) is protected by
+	 *    arenas_lock.
+	 * 2) Bin-related operations are protected by bin locks.
+	 * 3) Chunk- and run-related operations are protected by this mutex.
+	 */
+	malloc_mutex_t		lock;
+
+	arena_stats_t		stats;
+	/*
+	 * List of tcaches for extant threads associated with this arena.
+	 * Stats from these are merged incrementally, and at exit.
+	 */
+	ql_head(tcache_t)	tcache_ql;
+
+	uint64_t		prof_accumbytes;
+
+	/* List of dirty-page-containing chunks this arena manages. */
+	ql_head(arena_chunk_t)	chunks_dirty;
+
+	/*
+	 * In order to avoid rapid chunk allocation/deallocation when an arena
+	 * oscillates right on the cusp of needing a new chunk, cache the most
+	 * recently freed chunk.  The spare is left in the arena's chunk trees
+	 * until it is deleted.
+	 *
+	 * There is one spare chunk per arena, rather than one spare total, in
+	 * order to avoid interactions between multiple threads that could make
+	 * a single spare inadequate.
+	 */
+	arena_chunk_t		*spare;
+
+	/* Number of pages in active runs. */
+	size_t			nactive;
+
+	/*
+	 * Current count of pages within unused runs that are potentially
+	 * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
+	 * By tracking this, we can institute a limit on how much dirty unused
+	 * memory is mapped for each arena.
+	 */
+	size_t			ndirty;
+
+	/*
+	 * Approximate number of pages being purged.  It is possible for
+	 * multiple threads to purge dirty pages concurrently, and they use
+	 * npurgatory to indicate the total number of pages all threads are
+	 * attempting to purge.
+	 */
+	size_t			npurgatory;
+
+	/*
+	 * Size/address-ordered trees of this arena's available runs.  The trees
+	 * are used for first-best-fit run allocation.  The dirty tree contains
+	 * runs with dirty pages (i.e. very likely to have been touched and
+	 * therefore have associated physical pages), whereas the clean tree
+	 * contains runs with pages that either have no associated physical
+	 * pages, or have pages that the kernel may recycle at any time due to
+	 * previous madvise(2) calls.  The dirty tree is used in preference to
+	 * the clean tree for allocations, because using dirty pages reduces
+	 * the amount of dirty purging necessary to keep the active:dirty page
+	 * ratio below the purge threshold.
+	 */
+	arena_avail_tree_t	runs_avail_clean;
+	arena_avail_tree_t	runs_avail_dirty;
+
+	/* bins is used to store trees of free regions. */
+	arena_bin_t		bins[NBINS];
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+extern ssize_t	opt_lg_dirty_mult;
+/*
+ * small_size2bin is a compact lookup table that rounds request sizes up to
+ * size classes.  In order to reduce cache footprint, the table is compressed,
+ * and all accesses are via the SMALL_SIZE2BIN macro.
+ */
+extern uint8_t const	small_size2bin[];
+#define	SMALL_SIZE2BIN(s)	(small_size2bin[(s-1) >> LG_TINY_MIN])
+
+extern arena_bin_info_t	arena_bin_info[NBINS];
+
+/* Number of large size classes. */
+#define			nlclasses (chunk_npages - map_bias)
+
+void	arena_purge_all(arena_t *arena);
+void	arena_prof_accum(arena_t *arena, uint64_t accumbytes);
+void	arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
+    size_t binind, uint64_t prof_accumbytes);
+void	arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
+    bool zero);
+void	arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
+void	*arena_malloc_small(arena_t *arena, size_t size, bool zero);
+void	*arena_malloc_large(arena_t *arena, size_t size, bool zero);
+void	*arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
+size_t	arena_salloc(const void *ptr, bool demote);
+void	arena_prof_promoted(const void *ptr, size_t size);
+void	arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    arena_chunk_map_t *mapelm);
+void	arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
+void	arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
+    arena_stats_t *astats, malloc_bin_stats_t *bstats,
+    malloc_large_stats_t *lstats);
+void	*arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+    size_t extra, bool zero);
+void	*arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
+    size_t alignment, bool zero, bool try_tcache);
+bool	arena_new(arena_t *arena, unsigned ind);
+void	arena_boot(void);
+void	arena_prefork(arena_t *arena);
+void	arena_postfork_parent(arena_t *arena);
+void	arena_postfork_child(arena_t *arena);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+size_t	arena_bin_index(arena_t *arena, arena_bin_t *bin);
+unsigned	arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
+    const void *ptr);
+prof_ctx_t	*arena_prof_ctx_get(const void *ptr);
+void	arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
+void	*arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
+void	arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    bool try_tcache);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
+JEMALLOC_INLINE size_t
+arena_bin_index(arena_t *arena, arena_bin_t *bin)
+{
+	size_t binind = bin - arena->bins;
+	assert(binind < NBINS);
+	return (binind);
+}
+
+JEMALLOC_INLINE unsigned
+arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
+{
+	unsigned shift, diff, regind;
+	size_t interval;
+
+	/*
+	 * Freeing a pointer lower than region zero can cause assertion
+	 * failure.
+	 */
+	assert((uintptr_t)ptr >= (uintptr_t)run +
+	    (uintptr_t)bin_info->reg0_offset);
+
+	/*
+	 * Avoid doing division with a variable divisor if possible.  Using
+	 * actual division here can reduce allocator throughput by over 20%!
+	 */
+	diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run -
+	    bin_info->reg0_offset);
+
+	/* Rescale (factor powers of 2 out of the numerator and denominator). */
+	interval = bin_info->reg_interval;
+	shift = ffs(interval) - 1;
+	diff >>= shift;
+	interval >>= shift;
+
+	if (interval == 1) {
+		/* The divisor was a power of 2. */
+		regind = diff;
+	} else {
+		/*
+		 * To divide by a number D that is not a power of two we
+		 * multiply by (2^21 / D) and then right shift by 21 positions.
+		 *
+		 *   X / D
+		 *
+		 * becomes
+		 *
+		 *   (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
+		 *
+		 * We can omit the first three elements, because we never
+		 * divide by 0, and 1 and 2 are both powers of two, which are
+		 * handled above.
+		 */
+#define	SIZE_INV_SHIFT	((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
+#define	SIZE_INV(s)	(((1U << SIZE_INV_SHIFT) / (s)) + 1)
+		static const unsigned interval_invs[] = {
+		    SIZE_INV(3),
+		    SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
+		    SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
+		    SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
+		    SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
+		    SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
+		    SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
+		    SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
+		};
+
+		if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) +
+		    2)) {
+			regind = (diff * interval_invs[interval - 3]) >>
+			    SIZE_INV_SHIFT;
+		} else
+			regind = diff / interval;
+#undef SIZE_INV
+#undef SIZE_INV_SHIFT
+	}
+	assert(diff == regind * interval);
+	assert(regind < bin_info->nregs);
+
+	return (regind);
+}
+
+JEMALLOC_INLINE prof_ctx_t *
+arena_prof_ctx_get(const void *ptr)
+{
+	prof_ctx_t *ret;
+	arena_chunk_t *chunk;
+	size_t pageind, mapbits;
+
+	cassert(config_prof);
+	assert(ptr != NULL);
+	assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+	mapbits = chunk->map[pageind-map_bias].bits;
+	assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+	if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+		if (prof_promote)
+			ret = (prof_ctx_t *)(uintptr_t)1U;
+		else {
+			arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+			    (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
+			    LG_PAGE));
+			size_t binind = arena_bin_index(chunk->arena, run->bin);
+			arena_bin_info_t *bin_info = &arena_bin_info[binind];
+			unsigned regind;
+
+			regind = arena_run_regind(run, bin_info, ptr);
+			ret = *(prof_ctx_t **)((uintptr_t)run +
+			    bin_info->ctx0_offset + (regind *
+			    sizeof(prof_ctx_t *)));
+		}
+	} else
+		ret = chunk->map[pageind-map_bias].prof_ctx;
+
+	return (ret);
+}
+
+JEMALLOC_INLINE void
+arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+{
+	arena_chunk_t *chunk;
+	size_t pageind, mapbits;
+
+	cassert(config_prof);
+	assert(ptr != NULL);
+	assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+	mapbits = chunk->map[pageind-map_bias].bits;
+	assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+	if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+		if (prof_promote == false) {
+			arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+			    (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
+			    LG_PAGE));
+			arena_bin_t *bin = run->bin;
+			size_t binind;
+			arena_bin_info_t *bin_info;
+			unsigned regind;
+
+			binind = arena_bin_index(chunk->arena, bin);
+			bin_info = &arena_bin_info[binind];
+			regind = arena_run_regind(run, bin_info, ptr);
+
+			*((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset
+			    + (regind * sizeof(prof_ctx_t *)))) = ctx;
+		} else
+			assert((uintptr_t)ctx == (uintptr_t)1U);
+	} else
+		chunk->map[pageind-map_bias].prof_ctx = ctx;
+}
+
+JEMALLOC_INLINE void *
+arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
+{
+	tcache_t *tcache;
+
+	assert(size != 0);
+	assert(size <= arena_maxclass);
+
+	if (size <= SMALL_MAXCLASS) {
+		if (try_tcache && (tcache = tcache_get(true)) != NULL)
+			return (tcache_alloc_small(tcache, size, zero));
+		else {
+			return (arena_malloc_small(choose_arena(arena), size,
+			    zero));
+		}
+	} else {
+		/*
+		 * Initialize tcache after checking size in order to avoid
+		 * infinite recursion during tcache initialization.
+		 */
+		if (try_tcache && size <= tcache_maxclass && (tcache =
+		    tcache_get(true)) != NULL)
+			return (tcache_alloc_large(tcache, size, zero));
+		else {
+			return (arena_malloc_large(choose_arena(arena), size,
+			    zero));
+		}
+	}
+}
+
+JEMALLOC_INLINE void
+arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
+{
+	size_t pageind;
+	arena_chunk_map_t *mapelm;
+	tcache_t *tcache;
+
+	assert(arena != NULL);
+	assert(chunk->arena == arena);
+	assert(ptr != NULL);
+	assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+	mapelm = &chunk->map[pageind-map_bias];
+	assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
+	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
+		/* Small allocation. */
+		if (try_tcache && (tcache = tcache_get(false)) != NULL)
+			tcache_dalloc_small(tcache, ptr);
+		else {
+			arena_run_t *run;
+			arena_bin_t *bin;
+
+			run = (arena_run_t *)((uintptr_t)chunk +
+			    (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
+			    LG_PAGE));
+			bin = run->bin;
+			if (config_debug) {
+				size_t binind = arena_bin_index(arena, bin);
+				UNUSED arena_bin_info_t *bin_info =
+				    &arena_bin_info[binind];
+				assert(((uintptr_t)ptr - ((uintptr_t)run +
+				    (uintptr_t)bin_info->reg0_offset)) %
+				    bin_info->reg_interval == 0);
+			}
+			malloc_mutex_lock(&bin->lock);
+			arena_dalloc_bin(arena, chunk, ptr, mapelm);
+			malloc_mutex_unlock(&bin->lock);
+		}
+	} else {
+		size_t size = mapelm->bits & ~PAGE_MASK;
+
+		assert(((uintptr_t)ptr & PAGE_MASK) == 0);
+
+		if (try_tcache && size <= tcache_maxclass && (tcache =
+		    tcache_get(false)) != NULL) {
+			tcache_dalloc_large(tcache, ptr, size);
+		} else {
+			malloc_mutex_lock(&arena->lock);
+			arena_dalloc_large(arena, chunk, ptr);
+			malloc_mutex_unlock(&arena->lock);
+		}
+	}
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/atomic.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/atomic.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,240 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#define	atomic_read_uint64(p)	atomic_add_uint64(p, 0)
+#define	atomic_read_uint32(p)	atomic_add_uint32(p, 0)
+#define	atomic_read_z(p)	atomic_add_z(p, 0)
+#define	atomic_read_u(p)	atomic_add_u(p, 0)
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+uint64_t	atomic_add_uint64(uint64_t *p, uint64_t x);
+uint64_t	atomic_sub_uint64(uint64_t *p, uint64_t x);
+uint32_t	atomic_add_uint32(uint32_t *p, uint32_t x);
+uint32_t	atomic_sub_uint32(uint32_t *p, uint32_t x);
+size_t	atomic_add_z(size_t *p, size_t x);
+size_t	atomic_sub_z(size_t *p, size_t x);
+unsigned	atomic_add_u(unsigned *p, unsigned x);
+unsigned	atomic_sub_u(unsigned *p, unsigned x);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
+/******************************************************************************/
+/* 64-bit operations. */
+#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+
+	return (__sync_add_and_fetch(p, x));
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+
+	return (__sync_sub_and_fetch(p, x));
+}
+#elif (defined(JEMALLOC_OSATOMIC))
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+
+	return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+
+	return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
+}
+#elif (defined(__amd64__) || defined(__x86_64__))
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+
+	asm volatile (
+	    "lock; xaddq %0, %1;"
+	    : "+r" (x), "=m" (*p) /* Outputs. */
+	    : "m" (*p) /* Inputs. */
+	    );
+
+	return (x);
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+
+	x = (uint64_t)(-(int64_t)x);
+	asm volatile (
+	    "lock; xaddq %0, %1;"
+	    : "+r" (x), "=m" (*p) /* Outputs. */
+	    : "m" (*p) /* Inputs. */
+	    );
+
+	return (x);
+}
+#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
+JEMALLOC_INLINE uint64_t
+atomic_add_uint64(uint64_t *p, uint64_t x)
+{
+
+	return (__sync_add_and_fetch(p, x));
+}
+
+JEMALLOC_INLINE uint64_t
+atomic_sub_uint64(uint64_t *p, uint64_t x)
+{
+
+	return (__sync_sub_and_fetch(p, x));
+}
+#else
+#  if (LG_SIZEOF_PTR == 3)
+#    error "Missing implementation for 64-bit atomic operations"
+#  endif
+#endif
+
+/******************************************************************************/
+/* 32-bit operations. */
+#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+
+	return (__sync_add_and_fetch(p, x));
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+
+	return (__sync_sub_and_fetch(p, x));
+}
+#elif (defined(JEMALLOC_OSATOMIC))
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+
+	return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+
+	return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
+}
+#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+
+	asm volatile (
+	    "lock; xaddl %0, %1;"
+	    : "+r" (x), "=m" (*p) /* Outputs. */
+	    : "m" (*p) /* Inputs. */
+	    );
+
+	return (x);
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+
+	x = (uint32_t)(-(int32_t)x);
+	asm volatile (
+	    "lock; xaddl %0, %1;"
+	    : "+r" (x), "=m" (*p) /* Outputs. */
+	    : "m" (*p) /* Inputs. */
+	    );
+
+	return (x);
+}
+#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
+JEMALLOC_INLINE uint32_t
+atomic_add_uint32(uint32_t *p, uint32_t x)
+{
+
+	return (__sync_add_and_fetch(p, x));
+}
+
+JEMALLOC_INLINE uint32_t
+atomic_sub_uint32(uint32_t *p, uint32_t x)
+{
+
+	return (__sync_sub_and_fetch(p, x));
+}
+#else
+#  error "Missing implementation for 32-bit atomic operations"
+#endif
+
+/******************************************************************************/
+/* size_t operations. */
+JEMALLOC_INLINE size_t
+atomic_add_z(size_t *p, size_t x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+	return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
+#elif (LG_SIZEOF_PTR == 2)
+	return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
+#endif
+}
+
+JEMALLOC_INLINE size_t
+atomic_sub_z(size_t *p, size_t x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+	return ((size_t)atomic_add_uint64((uint64_t *)p,
+	    (uint64_t)-((int64_t)x)));
+#elif (LG_SIZEOF_PTR == 2)
+	return ((size_t)atomic_add_uint32((uint32_t *)p,
+	    (uint32_t)-((int32_t)x)));
+#endif
+}
+
+/******************************************************************************/
+/* unsigned operations. */
+JEMALLOC_INLINE unsigned
+atomic_add_u(unsigned *p, unsigned x)
+{
+
+#if (LG_SIZEOF_INT == 3)
+	return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
+#elif (LG_SIZEOF_INT == 2)
+	return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
+#endif
+}
+
+JEMALLOC_INLINE unsigned
+atomic_sub_u(unsigned *p, unsigned x)
+{
+
+#if (LG_SIZEOF_INT == 3)
+	return ((unsigned)atomic_add_uint64((uint64_t *)p,
+	    (uint64_t)-((int64_t)x)));
+#elif (LG_SIZEOF_INT == 2)
+	return ((unsigned)atomic_add_uint32((uint32_t *)p,
+	    (uint32_t)-((int32_t)x)));
+#endif
+}
+/******************************************************************************/
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/base.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/base.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,26 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void	*base_alloc(size_t size);
+void	*base_calloc(size_t number, size_t size);
+extent_node_t *base_node_alloc(void);
+void	base_node_dealloc(extent_node_t *node);
+bool	base_boot(void);
+void	base_prefork(void);
+void	base_postfork_parent(void);
+void	base_postfork_child(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/bitmap.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/bitmap.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,184 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
+#define	LG_BITMAP_MAXBITS	LG_RUN_MAXREGS
+
+typedef struct bitmap_level_s bitmap_level_t;
+typedef struct bitmap_info_s bitmap_info_t;
+typedef unsigned long bitmap_t;
+#define	LG_SIZEOF_BITMAP	LG_SIZEOF_LONG
+
+/* Number of bits per group. */
+#define	LG_BITMAP_GROUP_NBITS		(LG_SIZEOF_BITMAP + 3)
+#define	BITMAP_GROUP_NBITS		(ZU(1) << LG_BITMAP_GROUP_NBITS)
+#define	BITMAP_GROUP_NBITS_MASK		(BITMAP_GROUP_NBITS-1)
+
+/* Maximum number of levels possible. */
+#define	BITMAP_MAX_LEVELS						\
+    (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP)				\
+    + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct bitmap_level_s {
+	/* Offset of this level's groups within the array of groups. */
+	size_t group_offset;
+};
+
+struct bitmap_info_s {
+	/* Logical number of bits in bitmap (stored at bottom level). */
+	size_t nbits;
+
+	/* Number of levels necessary for nbits. */
+	unsigned nlevels;
+
+	/*
+	 * Only the first (nlevels+1) elements are used, and levels are ordered
+	 * bottom to top (e.g. the bottom level is stored in levels[0]).
+	 */
+	bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void	bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
+size_t	bitmap_info_ngroups(const bitmap_info_t *binfo);
+size_t	bitmap_size(size_t nbits);
+void	bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+bool	bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
+bool	bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
+void	bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
+size_t	bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
+void	bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
+JEMALLOC_INLINE bool
+bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+	unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
+	bitmap_t rg = bitmap[rgoff];
+	/* The bitmap is full iff the root group is 0. */
+	return (rg == 0);
+}
+
+JEMALLOC_INLINE bool
+bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
+{
+	size_t goff;
+	bitmap_t g;
+
+	assert(bit < binfo->nbits);
+	goff = bit >> LG_BITMAP_GROUP_NBITS;
+	g = bitmap[goff];
+	return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
+}
+
+JEMALLOC_INLINE void
+bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
+{
+	size_t goff;
+	bitmap_t *gp;
+	bitmap_t g;
+
+	assert(bit < binfo->nbits);
+	assert(bitmap_get(bitmap, binfo, bit) == false);
+	goff = bit >> LG_BITMAP_GROUP_NBITS;
+	gp = &bitmap[goff];
+	g = *gp;
+	assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
+	g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+	*gp = g;
+	assert(bitmap_get(bitmap, binfo, bit));
+	/* Propagate group state transitions up the tree. */
+	if (g == 0) {
+		unsigned i;
+		for (i = 1; i < binfo->nlevels; i++) {
+			bit = goff;
+			goff = bit >> LG_BITMAP_GROUP_NBITS;
+			gp = &bitmap[binfo->levels[i].group_offset + goff];
+			g = *gp;
+			assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
+			g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+			*gp = g;
+			if (g != 0)
+				break;
+		}
+	}
+}
+
+/* sfu: set first unset. */
+JEMALLOC_INLINE size_t
+bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+	size_t bit;
+	bitmap_t g;
+	unsigned i;
+
+	assert(bitmap_full(bitmap, binfo) == false);
+
+	i = binfo->nlevels - 1;
+	g = bitmap[binfo->levels[i].group_offset];
+	bit = ffsl(g) - 1;
+	while (i > 0) {
+		i--;
+		g = bitmap[binfo->levels[i].group_offset + bit];
+		bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
+	}
+
+	bitmap_set(bitmap, binfo, bit);
+	return (bit);
+}
+
+JEMALLOC_INLINE void
+bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
+{
+	size_t goff;
+	bitmap_t *gp;
+	bitmap_t g;
+	bool propagate;
+
+	assert(bit < binfo->nbits);
+	assert(bitmap_get(bitmap, binfo, bit));
+	goff = bit >> LG_BITMAP_GROUP_NBITS;
+	gp = &bitmap[goff];
+	g = *gp;
+	propagate = (g == 0);
+	assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
+	g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+	*gp = g;
+	assert(bitmap_get(bitmap, binfo, bit) == false);
+	/* Propagate group state transitions up the tree. */
+	if (propagate) {
+		unsigned i;
+		for (i = 1; i < binfo->nlevels; i++) {
+			bit = goff;
+			goff = bit >> LG_BITMAP_GROUP_NBITS;
+			gp = &bitmap[binfo->levels[i].group_offset + goff];
+			g = *gp;
+			propagate = (g == 0);
+			assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
+			    == 0);
+			g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+			*gp = g;
+			if (propagate == false)
+				break;
+		}
+	}
+}
+
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/chunk.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/chunk.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,58 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/*
+ * Size and alignment of memory chunks that are allocated by the OS's virtual
+ * memory system.
+ */
+#define	LG_CHUNK_DEFAULT	22
+
+/* Return the chunk address for allocation address a. */
+#define	CHUNK_ADDR2BASE(a)						\
+	((void *)((uintptr_t)(a) & ~chunksize_mask))
+
+/* Return the chunk offset of address a. */
+#define	CHUNK_ADDR2OFFSET(a)						\
+	((size_t)((uintptr_t)(a) & chunksize_mask))
+
+/* Return the smallest chunk multiple that is >= s. */
+#define	CHUNK_CEILING(s)						\
+	(((s) + chunksize_mask) & ~chunksize_mask)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+extern size_t		opt_lg_chunk;
+
+/* Protects stats_chunks; currently not used for any other purpose. */
+extern malloc_mutex_t	chunks_mtx;
+/* Chunk statistics. */
+extern chunk_stats_t	stats_chunks;
+
+extern rtree_t		*chunks_rtree;
+
+extern size_t		chunksize;
+extern size_t		chunksize_mask; /* (chunksize - 1). */
+extern size_t		chunk_npages;
+extern size_t		map_bias; /* Number of arena chunk header pages. */
+extern size_t		arena_maxclass; /* Max size class for arenas. */
+
+void	*chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
+void	chunk_dealloc(void *chunk, size_t size, bool unmap);
+bool	chunk_boot0(void);
+bool	chunk_boot1(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
+#include "jemalloc/internal/chunk_dss.h"
+#include "jemalloc/internal/chunk_mmap.h"
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,24 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void	*chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
+bool	chunk_in_dss(void *chunk);
+bool	chunk_dss_boot(void);
+void	chunk_dss_prefork(void);
+void	chunk_dss_postfork_parent(void);
+void	chunk_dss_postfork_child(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,22 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void	*chunk_alloc_mmap(size_t size, size_t alignment);
+bool	chunk_dealloc_mmap(void *chunk, size_t size);
+
+bool	chunk_mmap_boot(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/ckh.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/ckh.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,90 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct ckh_s ckh_t;
+typedef struct ckhc_s ckhc_t;
+
+/* Typedefs to allow easy function pointer passing. */
+typedef void ckh_hash_t (const void *, unsigned, size_t *, size_t *);
+typedef bool ckh_keycomp_t (const void *, const void *);
+
+/* Maintain counters used to get an idea of performance. */
+/* #define	CKH_COUNT */
+/* Print counter values in ckh_delete() (requires CKH_COUNT). */
+/* #define	CKH_VERBOSE */
+
+/*
+ * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket.  Try to fit
+ * one bucket per L1 cache line.
+ */
+#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+/* Hash table cell. */
+struct ckhc_s {
+	const void	*key;
+	const void	*data;
+};
+
+struct ckh_s {
+#ifdef CKH_COUNT
+	/* Counters used to get an idea of performance. */
+	uint64_t	ngrows;
+	uint64_t	nshrinks;
+	uint64_t	nshrinkfails;
+	uint64_t	ninserts;
+	uint64_t	nrelocs;
+#endif
+
+	/* Used for pseudo-random number generation. */
+#define	CKH_A		1103515241
+#define	CKH_C		12347
+	uint32_t	prng_state;
+
+	/* Total number of items. */
+	size_t		count;
+
+	/*
+	 * Minimum and current number of hash table buckets.  There are
+	 * 2^LG_CKH_BUCKET_CELLS cells per bucket.
+	 */
+	unsigned	lg_minbuckets;
+	unsigned	lg_curbuckets;
+
+	/* Hash and comparison functions. */
+	ckh_hash_t	*hash;
+	ckh_keycomp_t	*keycomp;
+
+	/* Hash table with 2^lg_curbuckets buckets. */
+	ckhc_t		*tab;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+bool	ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+    ckh_keycomp_t *keycomp);
+void	ckh_delete(ckh_t *ckh);
+size_t	ckh_count(ckh_t *ckh);
+bool	ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
+bool	ckh_insert(ckh_t *ckh, const void *key, const void *data);
+bool	ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
+    void **data);
+bool	ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
+void	ckh_string_hash(const void *key, unsigned minbits, size_t *hash1,
+    size_t *hash2);
+bool	ckh_string_keycomp(const void *k1, const void *k2);
+void	ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
+    size_t *hash2);
+bool	ckh_pointer_keycomp(const void *k1, const void *k2);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/ctl.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/ctl.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,109 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct ctl_node_s ctl_node_t;
+typedef struct ctl_arena_stats_s ctl_arena_stats_t;
+typedef struct ctl_stats_s ctl_stats_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct ctl_node_s {
+	bool			named;
+	union {
+		struct {
+			const char	*name;
+			/* If (nchildren == 0), this is a terminal node. */
+			unsigned	nchildren;
+			const	ctl_node_t *children;
+		} named;
+		struct {
+			const ctl_node_t *(*index)(const size_t *, size_t,
+			    size_t);
+		} indexed;
+	} u;
+	int	(*ctl)(const size_t *, size_t, void *, size_t *, void *,
+	    size_t);
+};
+
+struct ctl_arena_stats_s {
+	bool			initialized;
+	unsigned		nthreads;
+	size_t			pactive;
+	size_t			pdirty;
+	arena_stats_t		astats;
+
+	/* Aggregate stats for small size classes, based on bin stats. */
+	size_t			allocated_small;
+	uint64_t		nmalloc_small;
+	uint64_t		ndalloc_small;
+	uint64_t		nrequests_small;
+
+	malloc_bin_stats_t	bstats[NBINS];
+	malloc_large_stats_t	*lstats;	/* nlclasses elements. */
+};
+
+struct ctl_stats_s {
+	size_t			allocated;
+	size_t			active;
+	size_t			mapped;
+	struct {
+		size_t		current;	/* stats_chunks.curchunks */
+		uint64_t	total;		/* stats_chunks.nchunks */
+		size_t		high;		/* stats_chunks.highchunks */
+	} chunks;
+	struct {
+		size_t		allocated;	/* huge_allocated */
+		uint64_t	nmalloc;	/* huge_nmalloc */
+		uint64_t	ndalloc;	/* huge_ndalloc */
+	} huge;
+	ctl_arena_stats_t	*arenas;	/* (narenas + 1) elements. */
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+int	ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
+    size_t newlen);
+int	ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
+
+int	ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen);
+bool	ctl_boot(void);
+
+#define	xmallctl(name, oldp, oldlenp, newp, newlen) do {		\
+	if (je_mallctl(name, oldp, oldlenp, newp, newlen)		\
+	    != 0) {							\
+		malloc_printf(						\
+		    "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n",	\
+		    name);						\
+		abort();						\
+	}								\
+} while (0)
+
+#define	xmallctlnametomib(name, mibp, miblenp) do {			\
+	if (je_mallctlnametomib(name, mibp, miblenp) != 0) {		\
+		malloc_printf("<jemalloc>: Failure in "			\
+		    "xmallctlnametomib(\"%s\", ...)\n", name);		\
+		abort();						\
+	}								\
+} while (0)
+
+#define	xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do {	\
+	if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp,		\
+	    newlen) != 0) {						\
+		malloc_write(						\
+		    "<jemalloc>: Failure in xmallctlbymib()\n");	\
+		abort();						\
+	}								\
+} while (0)
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/extent.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/extent.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,43 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct extent_node_s extent_node_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+/* Tree of extents. */
+struct extent_node_s {
+	/* Linkage for the size/address-ordered tree. */
+	rb_node(extent_node_t)	link_szad;
+
+	/* Linkage for the address-ordered tree. */
+	rb_node(extent_node_t)	link_ad;
+
+	/* Profile counters, used for huge objects. */
+	prof_ctx_t		*prof_ctx;
+
+	/* Pointer to the extent that this tree node is responsible for. */
+	void			*addr;
+
+	/* Total region size. */
+	size_t			size;
+};
+typedef rb_tree(extent_node_t) extent_tree_t;
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
+
+rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/hash.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/hash.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,70 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+uint64_t	hash(const void *key, size_t len, uint64_t seed);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
+/*
+ * The following hash function is based on MurmurHash64A(), placed into the
+ * public domain by Austin Appleby.  See http://murmurhash.googlepages.com/ for
+ * details.
+ */
+JEMALLOC_INLINE uint64_t
+hash(const void *key, size_t len, uint64_t seed)
+{
+	const uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
+	const int r = 47;
+	uint64_t h = seed ^ (len * m);
+	const uint64_t *data = (const uint64_t *)key;
+	const uint64_t *end = data + (len/8);
+	const unsigned char *data2;
+
+	assert(((uintptr_t)key & 0x7) == 0);
+
+	while(data != end) {
+		uint64_t k = *data++;
+
+		k *= m;
+		k ^= k >> r;
+		k *= m;
+
+		h ^= k;
+		h *= m;
+	}
+
+	data2 = (const unsigned char *)data;
+	switch(len & 7) {
+	case 7: h ^= ((uint64_t)(data2[6])) << 48;
+	case 6: h ^= ((uint64_t)(data2[5])) << 40;
+	case 5: h ^= ((uint64_t)(data2[4])) << 32;
+	case 4: h ^= ((uint64_t)(data2[3])) << 24;
+	case 3: h ^= ((uint64_t)(data2[2])) << 16;
+	case 2: h ^= ((uint64_t)(data2[1])) << 8;
+	case 1: h ^= ((uint64_t)(data2[0]));
+		h *= m;
+	}
+
+	h ^= h >> r;
+	h *= m;
+	h ^= h >> r;
+
+	return (h);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/huge.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/huge.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,40 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+/* Huge allocation statistics. */
+extern uint64_t		huge_nmalloc;
+extern uint64_t		huge_ndalloc;
+extern size_t		huge_allocated;
+
+/* Protects chunk-related data structures. */
+extern malloc_mutex_t	huge_mtx;
+
+void	*huge_malloc(size_t size, bool zero);
+void	*huge_palloc(size_t size, size_t alignment, bool zero);
+void	*huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+    size_t extra);
+void	*huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
+    size_t alignment, bool zero);
+void	huge_dalloc(void *ptr, bool unmap);
+size_t	huge_salloc(const void *ptr);
+prof_ctx_t	*huge_prof_ctx_get(const void *ptr);
+void	huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
+bool	huge_boot(void);
+void	huge_prefork(void);
+void	huge_postfork_parent(void);
+void	huge_postfork_child(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,876 @@
+#include "libc_private.h"
+#include "namespace.h"
+
+#include <sys/mman.h>
+#include <sys/param.h>
+#include <sys/syscall.h>
+#if !defined(SYS_write) && defined(__NR_write)
+#define	SYS_write __NR_write
+#endif
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+
+#include <errno.h>
+#include <limits.h>
+#ifndef SIZE_T_MAX
+#  define SIZE_T_MAX	SIZE_MAX
+#endif
+#include <pthread.h>
+#include <sched.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stddef.h>
+#ifndef offsetof
+#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
+#endif
+#include <inttypes.h>
+#include <string.h>
+#include <strings.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <math.h>
+
+#include "un-namespace.h"
+#include "libc_private.h"
+
+#define	JEMALLOC_NO_DEMANGLE
+#include "../jemalloc.h"
+
+#ifdef JEMALLOC_UTRACE
+#include <sys/ktrace.h>
+#endif
+
+#ifdef JEMALLOC_VALGRIND
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+#endif
+
+#include "jemalloc/internal/private_namespace.h"
+
+#ifdef JEMALLOC_CC_SILENCE
+#define	UNUSED JEMALLOC_ATTR(unused)
+#else
+#define	UNUSED
+#endif
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_dss =
+#ifdef JEMALLOC_DSS
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_fill =
+#ifdef JEMALLOC_FILL
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_lazy_lock =
+#ifdef JEMALLOC_LAZY_LOCK
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_prof =
+#ifdef JEMALLOC_PROF
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_prof_libgcc =
+#ifdef JEMALLOC_PROF_LIBGCC
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_prof_libunwind =
+#ifdef JEMALLOC_PROF_LIBUNWIND
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_munmap =
+#ifdef JEMALLOC_MUNMAP
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_tcache =
+#ifdef JEMALLOC_TCACHE
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_tls =
+#ifdef JEMALLOC_TLS
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_utrace =
+#ifdef JEMALLOC_UTRACE
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_valgrind =
+#ifdef JEMALLOC_VALGRIND
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_xmalloc =
+#ifdef JEMALLOC_XMALLOC
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_ivsalloc =
+#ifdef JEMALLOC_IVSALLOC
+    true
+#else
+    false
+#endif
+    ;
+
+#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
+#include <libkern/OSAtomic.h>
+#endif
+
+#ifdef JEMALLOC_ZONE
+#include <mach/mach_error.h>
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#include <malloc/malloc.h>
+#endif
+
+#define	RB_COMPACT
+#include "jemalloc/internal/rb.h"
+#include "jemalloc/internal/qr.h"
+#include "jemalloc/internal/ql.h"
+
+/*
+ * jemalloc can conceptually be broken into components (arena, tcache, etc.),
+ * but there are circular dependencies that cannot be broken without
+ * substantial performance degradation.  In order to reduce the effect on
+ * visual code flow, read the header files in multiple passes, with one of the
+ * following cpp variables defined during each pass:
+ *
+ *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
+ *                        types.
+ *   JEMALLOC_H_STRUCTS : Data structures.
+ *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
+ *   JEMALLOC_H_INLINES : Inline functions.
+ */
+/******************************************************************************/
+#define JEMALLOC_H_TYPES
+
+#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
+
+#define	ZU(z)	((size_t)z)
+
+#ifndef __DECONST
+#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
+#endif
+
+#ifdef JEMALLOC_DEBUG
+   /* Disable inlining to make debugging easier. */
+#  define JEMALLOC_INLINE
+#  define inline
+#else
+#  define JEMALLOC_ENABLE_INLINE
+#  define JEMALLOC_INLINE static inline
+#endif
+
+/* Smallest size class to support. */
+#define	LG_TINY_MIN		3
+#define	TINY_MIN		(1U << LG_TINY_MIN)
+
+/*
+ * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+#ifndef LG_QUANTUM
+#  ifdef __i386__
+#    define LG_QUANTUM		4
+#  endif
+#  ifdef __ia64__
+#    define LG_QUANTUM		4
+#  endif
+#  ifdef __alpha__
+#    define LG_QUANTUM		4
+#  endif
+#  ifdef __sparc64__
+#    define LG_QUANTUM		4
+#  endif
+#  if (defined(__amd64__) || defined(__x86_64__))
+#    define LG_QUANTUM		4
+#  endif
+#  ifdef __arm__
+#    define LG_QUANTUM		3
+#  endif
+#  ifdef __mips__
+#    define LG_QUANTUM		3
+#  endif
+#  ifdef __powerpc__
+#    define LG_QUANTUM		4
+#  endif
+#  ifdef __s390x__
+#    define LG_QUANTUM		4
+#  endif
+#  ifdef __SH4__
+#    define LG_QUANTUM		4
+#  endif
+#  ifdef __tile__
+#    define LG_QUANTUM		4
+#  endif
+#  ifndef LG_QUANTUM
+#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
+#  endif
+#endif
+
+#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
+#define	QUANTUM_MASK		(QUANTUM - 1)
+
+/* Return the smallest quantum multiple that is >= a. */
+#define	QUANTUM_CEILING(a)						\
+	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
+
+#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
+#define	LONG_MASK		(LONG - 1)
+
+/* Return the smallest long multiple that is >= a. */
+#define	LONG_CEILING(a)							\
+	(((a) + LONG_MASK) & ~LONG_MASK)
+
+#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
+#define	PTR_MASK		(SIZEOF_PTR - 1)
+
+/* Return the smallest (void *) multiple that is >= a. */
+#define	PTR_CEILING(a)							\
+	(((a) + PTR_MASK) & ~PTR_MASK)
+
+/*
+ * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
+ * In addition, this controls the spacing of cacheline-spaced size classes.
+ */
+#define	LG_CACHELINE		6
+#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
+#define	CACHELINE_MASK		(CACHELINE - 1)
+
+/* Return the smallest cacheline multiple that is >= s. */
+#define	CACHELINE_CEILING(s)						\
+	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
+
+/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
+#ifdef PAGE_MASK
+#  undef PAGE_MASK
+#endif
+#define	LG_PAGE		STATIC_PAGE_SHIFT
+#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
+#define	PAGE_MASK	((size_t)(PAGE - 1))
+
+/* Return the smallest pagesize multiple that is >= s. */
+#define	PAGE_CEILING(s)							\
+	(((s) + PAGE_MASK) & ~PAGE_MASK)
+
+/* Return the nearest aligned address at or below a. */
+#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
+	((void *)((uintptr_t)(a) & (-(alignment))))
+
+/* Return the offset between a and the nearest aligned address at or below a. */
+#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
+	((size_t)((uintptr_t)(a) & (alignment - 1)))
+
+/* Return the smallest alignment multiple that is >= s. */
+#define	ALIGNMENT_CEILING(s, alignment)					\
+	(((s) + (alignment - 1)) & (-(alignment)))
+
+#ifdef JEMALLOC_VALGRIND
+/*
+ * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
+ * so that when Valgrind reports errors, there are no extra stack frames
+ * in the backtraces.
+ *
+ * The size that is reported to valgrind must be consistent through a chain of
+ * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
+ * jemalloc, so it is critical that all callers of these macros provide usize
+ * rather than request size.  As a result, buffer overflow detection is
+ * technically weakened for the standard API, though it is generally accepted
+ * practice to consider any extra bytes reported by malloc_usable_size() as
+ * usable space.
+ */
+#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
+	if (config_valgrind && opt_valgrind && cond)			\
+		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
+} while (0)
+#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
+    old_rzsize, zero)  do {						\
+	if (config_valgrind && opt_valgrind) {				\
+		size_t rzsize = p2rz(ptr);				\
+									\
+		if (ptr == old_ptr) {					\
+			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
+			    usize, rzsize);				\
+			if (zero && old_usize < usize) {		\
+				VALGRIND_MAKE_MEM_DEFINED(		\
+				    (void *)((uintptr_t)ptr +		\
+				    old_usize), usize - old_usize);	\
+			}						\
+		} else {						\
+			if (old_ptr != NULL) {				\
+				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
+				    old_rzsize);			\
+			}						\
+			if (ptr != NULL) {				\
+				size_t copy_size = (old_usize < usize)	\
+				    ?  old_usize : usize;		\
+				size_t tail_size = usize - copy_size;	\
+				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
+				    rzsize, false);			\
+				if (copy_size > 0) {			\
+					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
+					    copy_size);			\
+				}					\
+				if (zero && tail_size > 0) {		\
+					VALGRIND_MAKE_MEM_DEFINED(	\
+					    (void *)((uintptr_t)ptr +	\
+					    copy_size), tail_size);	\
+				}					\
+			}						\
+		}							\
+	}								\
+} while (0)
+#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
+	if (config_valgrind && opt_valgrind)				\
+		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
+} while (0)
+#else
+#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
+#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
+#define	VALGRIND_FREELIKE_BLOCK(addr, rzB)
+#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
+#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
+#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
+#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
+    old_rzsize, zero)
+#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize)
+#endif
+
+#include "jemalloc/internal/util.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/stats.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/mb.h"
+#include "jemalloc/internal/extent.h"
+#include "jemalloc/internal/arena.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/chunk.h"
+#include "jemalloc/internal/huge.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/tcache.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
+#include "jemalloc/internal/prof.h"
+
+#undef JEMALLOC_H_TYPES
+/******************************************************************************/
+#define JEMALLOC_H_STRUCTS
+
+#include "jemalloc/internal/util.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/stats.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/mb.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/extent.h"
+#include "jemalloc/internal/arena.h"
+#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/chunk.h"
+#include "jemalloc/internal/huge.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/tcache.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
+#include "jemalloc/internal/prof.h"
+
+typedef struct {
+	uint64_t	allocated;
+	uint64_t	deallocated;
+} thread_allocated_t;
+/*
+ * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
+ * argument.
+ */
+#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
+
+#undef JEMALLOC_H_STRUCTS
+/******************************************************************************/
+#define JEMALLOC_H_EXTERNS
+
+extern bool	opt_abort;
+extern bool	opt_junk;
+extern size_t	opt_quarantine;
+extern bool	opt_redzone;
+extern bool	opt_utrace;
+extern bool	opt_valgrind;
+extern bool	opt_xmalloc;
+extern bool	opt_zero;
+extern size_t	opt_narenas;
+
+/* Number of CPUs. */
+extern unsigned		ncpus;
+
+extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
+/*
+ * Arenas that are used to service external requests.  Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ */
+extern arena_t		**arenas;
+extern unsigned		narenas;
+
+arena_t	*arenas_extend(unsigned ind);
+void	arenas_cleanup(void *arg);
+arena_t	*choose_arena_hard(void);
+void	jemalloc_prefork(void);
+void	jemalloc_postfork_parent(void);
+void	jemalloc_postfork_child(void);
+
+#include "jemalloc/internal/util.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/stats.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/mb.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/extent.h"
+#include "jemalloc/internal/arena.h"
+#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/chunk.h"
+#include "jemalloc/internal/huge.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/tcache.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
+#include "jemalloc/internal/prof.h"
+
+#undef JEMALLOC_H_EXTERNS
+/******************************************************************************/
+#define JEMALLOC_H_INLINES
+
+#include "jemalloc/internal/util.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/stats.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/mb.h"
+#include "jemalloc/internal/extent.h"
+#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/chunk.h"
+#include "jemalloc/internal/huge.h"
+
+#ifndef JEMALLOC_ENABLE_INLINE
+malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
+
+size_t	s2u(size_t size);
+size_t	sa2u(size_t size, size_t alignment);
+arena_t	*choose_arena(arena_t *arena);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
+/*
+ * Map of pthread_self() --> arenas[???], used for selecting an arena to use
+ * for allocations.
+ */
+malloc_tsd_externs(arenas, arena_t *)
+malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
+
+/*
+ * Compute usable size that would result from allocating an object with the
+ * specified size.
+ */
+JEMALLOC_INLINE size_t
+s2u(size_t size)
+{
+
+	if (size <= SMALL_MAXCLASS)
+		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
+	if (size <= arena_maxclass)
+		return (PAGE_CEILING(size));
+	return (CHUNK_CEILING(size));
+}
+
+/*
+ * Compute usable size that would result from allocating an object with the
+ * specified size and alignment.
+ */
+JEMALLOC_INLINE size_t
+sa2u(size_t size, size_t alignment)
+{
+	size_t usize;
+
+	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
+
+	/*
+	 * Round size up to the nearest multiple of alignment.
+	 *
+	 * This done, we can take advantage of the fact that for each small
+	 * size class, every object is aligned at the smallest power of two
+	 * that is non-zero in the base two representation of the size.  For
+	 * example:
+	 *
+	 *   Size |   Base 2 | Minimum alignment
+	 *   -----+----------+------------------
+	 *     96 |  1100000 |  32
+	 *    144 | 10100000 |  32
+	 *    192 | 11000000 |  64
+	 */
+	usize = ALIGNMENT_CEILING(size, alignment);
+	/*
+	 * (usize < size) protects against the combination of maximal
+	 * alignment and size greater than maximal alignment.
+	 */
+	if (usize < size) {
+		/* size_t overflow. */
+		return (0);
+	}
+
+	if (usize <= arena_maxclass && alignment <= PAGE) {
+		if (usize <= SMALL_MAXCLASS)
+			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
+		return (PAGE_CEILING(usize));
+	} else {
+		size_t run_size;
+
+		/*
+		 * We can't achieve subpage alignment, so round up alignment
+		 * permanently; it makes later calculations simpler.
+		 */
+		alignment = PAGE_CEILING(alignment);
+		usize = PAGE_CEILING(size);
+		/*
+		 * (usize < size) protects against very large sizes within
+		 * PAGE of SIZE_T_MAX.
+		 *
+		 * (usize + alignment < usize) protects against the
+		 * combination of maximal alignment and usize large enough
+		 * to cause overflow.  This is similar to the first overflow
+		 * check above, but it needs to be repeated due to the new
+		 * usize value, which may now be *equal* to maximal
+		 * alignment, whereas before we only detected overflow if the
+		 * original size was *greater* than maximal alignment.
+		 */
+		if (usize < size || usize + alignment < usize) {
+			/* size_t overflow. */
+			return (0);
+		}
+
+		/*
+		 * Calculate the size of the over-size run that arena_palloc()
+		 * would need to allocate in order to guarantee the alignment.
+		 * If the run wouldn't fit within a chunk, round up to a huge
+		 * allocation size.
+		 */
+		run_size = usize + alignment - PAGE;
+		if (run_size <= arena_maxclass)
+			return (PAGE_CEILING(usize));
+		return (CHUNK_CEILING(usize));
+	}
+}
+
+/* Choose an arena based on a per-thread value. */
+JEMALLOC_INLINE arena_t *
+choose_arena(arena_t *arena)
+{
+	arena_t *ret;
+
+	if (arena != NULL)
+		return (arena);
+
+	if ((ret = *arenas_tsd_get()) == NULL) {
+		ret = choose_arena_hard();
+		assert(ret != NULL);
+	}
+
+	return (ret);
+}
+#endif
+
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/tcache.h"
+#include "jemalloc/internal/arena.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void	*imalloc(size_t size);
+void	*icalloc(size_t size);
+void	*ipalloc(size_t usize, size_t alignment, bool zero);
+size_t	isalloc(const void *ptr, bool demote);
+size_t	ivsalloc(const void *ptr, bool demote);
+size_t	u2rz(size_t usize);
+size_t	p2rz(const void *ptr);
+void	idalloc(void *ptr);
+void	iqalloc(void *ptr);
+void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
+    bool zero, bool no_move);
+malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
+JEMALLOC_INLINE void *
+imalloc(size_t size)
+{
+
+	assert(size != 0);
+
+	if (size <= arena_maxclass)
+		return (arena_malloc(NULL, size, false, true));
+	else
+		return (huge_malloc(size, false));
+}
+
+JEMALLOC_INLINE void *
+icalloc(size_t size)
+{
+
+	if (size <= arena_maxclass)
+		return (arena_malloc(NULL, size, true, true));
+	else
+		return (huge_malloc(size, true));
+}
+
+JEMALLOC_INLINE void *
+ipalloc(size_t usize, size_t alignment, bool zero)
+{
+	void *ret;
+
+	assert(usize != 0);
+	assert(usize == sa2u(usize, alignment));
+
+	if (usize <= arena_maxclass && alignment <= PAGE)
+		ret = arena_malloc(NULL, usize, zero, true);
+	else {
+		if (usize <= arena_maxclass) {
+			ret = arena_palloc(choose_arena(NULL), usize, alignment,
+			    zero);
+		} else if (alignment <= chunksize)
+			ret = huge_malloc(usize, zero);
+		else
+			ret = huge_palloc(usize, alignment, zero);
+	}
+
+	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
+	return (ret);
+}
+
+/*
+ * Typical usage:
+ *   void *ptr = [...]
+ *   size_t sz = isalloc(ptr, config_prof);
+ */
+JEMALLOC_INLINE size_t
+isalloc(const void *ptr, bool demote)
+{
+	size_t ret;
+	arena_chunk_t *chunk;
+
+	assert(ptr != NULL);
+	/* Demotion only makes sense if config_prof is true. */
+	assert(config_prof || demote == false);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr) {
+		/* Region. */
+		ret = arena_salloc(ptr, demote);
+	} else
+		ret = huge_salloc(ptr);
+
+	return (ret);
+}
+
+JEMALLOC_INLINE size_t
+ivsalloc(const void *ptr, bool demote)
+{
+
+	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
+	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
+		return (0);
+
+	return (isalloc(ptr, demote));
+}
+
+JEMALLOC_INLINE size_t
+u2rz(size_t usize)
+{
+	size_t ret;
+
+	if (usize <= SMALL_MAXCLASS) {
+		size_t binind = SMALL_SIZE2BIN(usize);
+		ret = arena_bin_info[binind].redzone_size;
+	} else
+		ret = 0;
+
+	return (ret);
+}
+
+JEMALLOC_INLINE size_t
+p2rz(const void *ptr)
+{
+	size_t usize = isalloc(ptr, false);
+
+	return (u2rz(usize));
+}
+
+JEMALLOC_INLINE void
+idalloc(void *ptr)
+{
+	arena_chunk_t *chunk;
+
+	assert(ptr != NULL);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr)
+		arena_dalloc(chunk->arena, chunk, ptr, true);
+	else
+		huge_dalloc(ptr, true);
+}
+
+JEMALLOC_INLINE void
+iqalloc(void *ptr)
+{
+
+	if (config_fill && opt_quarantine)
+		quarantine(ptr);
+	else
+		idalloc(ptr);
+}
+
+JEMALLOC_INLINE void *
+iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+    bool no_move)
+{
+	void *ret;
+	size_t oldsize;
+
+	assert(ptr != NULL);
+	assert(size != 0);
+
+	oldsize = isalloc(ptr, config_prof);
+
+	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+	    != 0) {
+		size_t usize, copysize;
+
+		/*
+		 * Existing object alignment is inadequate; allocate new space
+		 * and copy.
+		 */
+		if (no_move)
+			return (NULL);
+		usize = sa2u(size + extra, alignment);
+		if (usize == 0)
+			return (NULL);
+		ret = ipalloc(usize, alignment, zero);
+		if (ret == NULL) {
+			if (extra == 0)
+				return (NULL);
+			/* Try again, without extra this time. */
+			usize = sa2u(size, alignment);
+			if (usize == 0)
+				return (NULL);
+			ret = ipalloc(usize, alignment, zero);
+			if (ret == NULL)
+				return (NULL);
+		}
+		/*
+		 * Copy at most size bytes (not size+extra), since the caller
+		 * has no expectation that the extra bytes will be reliably
+		 * preserved.
+		 */
+		copysize = (size < oldsize) ? size : oldsize;
+		memcpy(ret, ptr, copysize);
+		iqalloc(ptr);
+		return (ret);
+	}
+
+	if (no_move) {
+		if (size <= arena_maxclass) {
+			return (arena_ralloc_no_move(ptr, oldsize, size,
+			    extra, zero));
+		} else {
+			return (huge_ralloc_no_move(ptr, oldsize, size,
+			    extra));
+		}
+	} else {
+		if (size + extra <= arena_maxclass) {
+			return (arena_ralloc(ptr, oldsize, size, extra,
+			    alignment, zero, true));
+		} else {
+			return (huge_ralloc(ptr, oldsize, size, extra,
+			    alignment, zero));
+		}
+	}
+}
+
+malloc_tsd_externs(thread_allocated, thread_allocated_t)
+malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
+    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
+#endif
+
+#include "jemalloc/internal/prof.h"
+
+#undef JEMALLOC_H_INLINES
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/mb.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/mb.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,115 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void	mb_write(void);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
+#ifdef __i386__
+/*
+ * According to the Intel Architecture Software Developer's Manual, current
+ * processors execute instructions in order from the perspective of other
+ * processors in a multiprocessor system, but 1) Intel reserves the right to
+ * change that, and 2) the compiler's optimizer could re-order instructions if
+ * there weren't some form of barrier.  Therefore, even if running on an
+ * architecture that does not need memory barriers (everything through at least
+ * i686), an "optimizer barrier" is necessary.
+ */
+JEMALLOC_INLINE void
+mb_write(void)
+{
+
+#  if 0
+	/* This is a true memory barrier. */
+	asm volatile ("pusha;"
+	    "xor  %%eax,%%eax;"
+	    "cpuid;"
+	    "popa;"
+	    : /* Outputs. */
+	    : /* Inputs. */
+	    : "memory" /* Clobbers. */
+	    );
+#else
+	/*
+	 * This is hopefully enough to keep the compiler from reordering
+	 * instructions around this one.
+	 */
+	asm volatile ("nop;"
+	    : /* Outputs. */
+	    : /* Inputs. */
+	    : "memory" /* Clobbers. */
+	    );
+#endif
+}
+#elif (defined(__amd64__) || defined(__x86_64__))
+JEMALLOC_INLINE void
+mb_write(void)
+{
+
+	asm volatile ("sfence"
+	    : /* Outputs. */
+	    : /* Inputs. */
+	    : "memory" /* Clobbers. */
+	    );
+}
+#elif defined(__powerpc__)
+JEMALLOC_INLINE void
+mb_write(void)
+{
+
+	asm volatile ("eieio"
+	    : /* Outputs. */
+	    : /* Inputs. */
+	    : "memory" /* Clobbers. */
+	    );
+}
+#elif defined(__sparc64__)
+JEMALLOC_INLINE void
+mb_write(void)
+{
+
+	asm volatile ("membar #StoreStore"
+	    : /* Outputs. */
+	    : /* Inputs. */
+	    : "memory" /* Clobbers. */
+	    );
+}
+#elif defined(__tile__)
+JEMALLOC_INLINE void
+mb_write(void)
+{
+
+	__sync_synchronize();
+}
+#else
+/*
+ * This is much slower than a simple memory barrier, but the semantics of mutex
+ * unlock make this work.
+ */
+JEMALLOC_INLINE void
+mb_write(void)
+{
+	malloc_mutex_t mtx;
+
+	malloc_mutex_init(&mtx);
+	malloc_mutex_lock(&mtx);
+	malloc_mutex_unlock(&mtx);
+}
+#endif
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/mutex.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/mutex.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,88 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct malloc_mutex_s malloc_mutex_t;
+
+#ifdef JEMALLOC_OSSPIN
+#define	MALLOC_MUTEX_INITIALIZER {0}
+#elif (defined(JEMALLOC_MUTEX_INIT_CB))
+#define	MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
+#else
+#  if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) &&				\
+       defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
+#    define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
+#    define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
+#  else
+#    define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
+#    define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
+#  endif
+#endif
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct malloc_mutex_s {
+#ifdef JEMALLOC_OSSPIN
+	OSSpinLock		lock;
+#elif (defined(JEMALLOC_MUTEX_INIT_CB))
+	pthread_mutex_t		lock;
+	malloc_mutex_t		*postponed_next;
+#else
+	pthread_mutex_t		lock;
+#endif
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#ifdef JEMALLOC_LAZY_LOCK
+extern bool isthreaded;
+#endif
+
+bool	malloc_mutex_init(malloc_mutex_t *mutex);
+void	malloc_mutex_prefork(malloc_mutex_t *mutex);
+void	malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
+void	malloc_mutex_postfork_child(malloc_mutex_t *mutex);
+bool	mutex_boot(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void	malloc_mutex_lock(malloc_mutex_t *mutex);
+void	malloc_mutex_unlock(malloc_mutex_t *mutex);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
+JEMALLOC_INLINE void
+malloc_mutex_lock(malloc_mutex_t *mutex)
+{
+
+	if (isthreaded) {
+#ifdef JEMALLOC_OSSPIN
+		OSSpinLockLock(&mutex->lock);
+#else
+		pthread_mutex_lock(&mutex->lock);
+#endif
+	}
+}
+
+JEMALLOC_INLINE void
+malloc_mutex_unlock(malloc_mutex_t *mutex)
+{
+
+	if (isthreaded) {
+#ifdef JEMALLOC_OSSPIN
+		OSSpinLockUnlock(&mutex->lock);
+#else
+		pthread_mutex_unlock(&mutex->lock);
+#endif
+	}
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,274 @@
+#define	arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
+#define	arena_bin_index JEMALLOC_N(arena_bin_index)
+#define	arena_boot JEMALLOC_N(arena_boot)
+#define	arena_dalloc JEMALLOC_N(arena_dalloc)
+#define	arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
+#define	arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
+#define	arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
+#define	arena_malloc JEMALLOC_N(arena_malloc)
+#define	arena_malloc_large JEMALLOC_N(arena_malloc_large)
+#define	arena_malloc_small JEMALLOC_N(arena_malloc_small)
+#define	arena_new JEMALLOC_N(arena_new)
+#define	arena_palloc JEMALLOC_N(arena_palloc)
+#define	arena_postfork_child JEMALLOC_N(arena_postfork_child)
+#define	arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
+#define	arena_prefork JEMALLOC_N(arena_prefork)
+#define	arena_prof_accum JEMALLOC_N(arena_prof_accum)
+#define	arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
+#define	arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
+#define	arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
+#define	arena_purge_all JEMALLOC_N(arena_purge_all)
+#define	arena_ralloc JEMALLOC_N(arena_ralloc)
+#define	arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
+#define	arena_run_regind JEMALLOC_N(arena_run_regind)
+#define	arena_salloc JEMALLOC_N(arena_salloc)
+#define	arena_stats_merge JEMALLOC_N(arena_stats_merge)
+#define	arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
+#define	arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
+#define	arenas_cleanup JEMALLOC_N(arenas_cleanup)
+#define	arenas_extend JEMALLOC_N(arenas_extend)
+#define	arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
+#define	arenas_tls JEMALLOC_N(arenas_tls)
+#define	arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
+#define	arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
+#define	arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
+#define	arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
+#define	atomic_add_u JEMALLOC_N(atomic_add_u)
+#define	atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
+#define	atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
+#define	atomic_add_z JEMALLOC_N(atomic_add_z)
+#define	atomic_sub_u JEMALLOC_N(atomic_sub_u)
+#define	atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
+#define	atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
+#define	atomic_sub_z JEMALLOC_N(atomic_sub_z)
+#define	base_alloc JEMALLOC_N(base_alloc)
+#define	base_boot JEMALLOC_N(base_boot)
+#define	base_calloc JEMALLOC_N(base_calloc)
+#define	base_node_alloc JEMALLOC_N(base_node_alloc)
+#define	base_node_dealloc JEMALLOC_N(base_node_dealloc)
+#define	base_postfork_child JEMALLOC_N(base_postfork_child)
+#define	base_postfork_parent JEMALLOC_N(base_postfork_parent)
+#define	base_prefork JEMALLOC_N(base_prefork)
+#define	bitmap_full JEMALLOC_N(bitmap_full)
+#define	bitmap_get JEMALLOC_N(bitmap_get)
+#define	bitmap_info_init JEMALLOC_N(bitmap_info_init)
+#define	bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
+#define	bitmap_init JEMALLOC_N(bitmap_init)
+#define	bitmap_set JEMALLOC_N(bitmap_set)
+#define	bitmap_sfu JEMALLOC_N(bitmap_sfu)
+#define	bitmap_size JEMALLOC_N(bitmap_size)
+#define	bitmap_unset JEMALLOC_N(bitmap_unset)
+#define	bt_init JEMALLOC_N(bt_init)
+#define	buferror JEMALLOC_N(buferror)
+#define	choose_arena JEMALLOC_N(choose_arena)
+#define	choose_arena_hard JEMALLOC_N(choose_arena_hard)
+#define	chunk_alloc JEMALLOC_N(chunk_alloc)
+#define	chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
+#define	chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
+#define	chunk_boot0 JEMALLOC_N(chunk_boot0)
+#define	chunk_boot1 JEMALLOC_N(chunk_boot1)
+#define	chunk_dealloc JEMALLOC_N(chunk_dealloc)
+#define	chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
+#define	chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
+#define	chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
+#define	chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
+#define	chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
+#define	chunk_in_dss JEMALLOC_N(chunk_in_dss)
+#define	chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot)
+#define	ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
+#define	ckh_count JEMALLOC_N(ckh_count)
+#define	ckh_delete JEMALLOC_N(ckh_delete)
+#define	ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
+#define	ckh_insert JEMALLOC_N(ckh_insert)
+#define	ckh_isearch JEMALLOC_N(ckh_isearch)
+#define	ckh_iter JEMALLOC_N(ckh_iter)
+#define	ckh_new JEMALLOC_N(ckh_new)
+#define	ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
+#define	ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
+#define	ckh_rebuild JEMALLOC_N(ckh_rebuild)
+#define	ckh_remove JEMALLOC_N(ckh_remove)
+#define	ckh_search JEMALLOC_N(ckh_search)
+#define	ckh_string_hash JEMALLOC_N(ckh_string_hash)
+#define	ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
+#define	ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
+#define	ckh_try_insert JEMALLOC_N(ckh_try_insert)
+#define	ctl_boot JEMALLOC_N(ctl_boot)
+#define	ctl_bymib JEMALLOC_N(ctl_bymib)
+#define	ctl_byname JEMALLOC_N(ctl_byname)
+#define	ctl_nametomib JEMALLOC_N(ctl_nametomib)
+#define	extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
+#define	extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
+#define	extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
+#define	extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
+#define	extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
+#define	extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
+#define	extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
+#define	extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
+#define	extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
+#define	extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
+#define	extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
+#define	extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
+#define	extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
+#define	extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
+#define	extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
+#define	extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
+#define	extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
+#define	extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
+#define	extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
+#define	extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
+#define	extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
+#define	extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
+#define	extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
+#define	extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
+#define	extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
+#define	extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
+#define	extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
+#define	extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
+#define	extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
+#define	extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
+#define	extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
+#define	extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
+#define	hash JEMALLOC_N(hash)
+#define	huge_boot JEMALLOC_N(huge_boot)
+#define	huge_dalloc JEMALLOC_N(huge_dalloc)
+#define	huge_malloc JEMALLOC_N(huge_malloc)
+#define	huge_palloc JEMALLOC_N(huge_palloc)
+#define	huge_postfork_child JEMALLOC_N(huge_postfork_child)
+#define	huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
+#define	huge_prefork JEMALLOC_N(huge_prefork)
+#define	huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
+#define	huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
+#define	huge_ralloc JEMALLOC_N(huge_ralloc)
+#define	huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
+#define	huge_salloc JEMALLOC_N(huge_salloc)
+#define	iallocm JEMALLOC_N(iallocm)
+#define	icalloc JEMALLOC_N(icalloc)
+#define	idalloc JEMALLOC_N(idalloc)
+#define	imalloc JEMALLOC_N(imalloc)
+#define	ipalloc JEMALLOC_N(ipalloc)
+#define	iqalloc JEMALLOC_N(iqalloc)
+#define	iralloc JEMALLOC_N(iralloc)
+#define	isalloc JEMALLOC_N(isalloc)
+#define	ivsalloc JEMALLOC_N(ivsalloc)
+#define	jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
+#define	jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
+#define	jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
+#define	malloc_cprintf JEMALLOC_N(malloc_cprintf)
+#define	malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
+#define	malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
+#define	malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
+#define	malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
+#define	malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
+#define	malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
+#define	malloc_printf JEMALLOC_N(malloc_printf)
+#define	malloc_snprintf JEMALLOC_N(malloc_snprintf)
+#define	malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
+#define	malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
+#define	malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
+#define	malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
+#define	malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
+#define	malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
+#define	malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
+#define	malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
+#define	malloc_write JEMALLOC_N(malloc_write)
+#define	mb_write JEMALLOC_N(mb_write)
+#define	mmap_unaligned_tsd_boot JEMALLOC_N(mmap_unaligned_tsd_boot)
+#define	mmap_unaligned_tsd_cleanup_wrapper JEMALLOC_N(mmap_unaligned_tsd_cleanup_wrapper)
+#define	mmap_unaligned_tsd_get JEMALLOC_N(mmap_unaligned_tsd_get)
+#define	mmap_unaligned_tsd_set JEMALLOC_N(mmap_unaligned_tsd_set)
+#define	mutex_boot JEMALLOC_N(mutex_boot)
+#define	opt_abort JEMALLOC_N(opt_abort)
+#define	opt_junk JEMALLOC_N(opt_junk)
+#define	opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
+#define	opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
+#define	opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
+#define	opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
+#define	opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
+#define	opt_narenas JEMALLOC_N(opt_narenas)
+#define	opt_prof JEMALLOC_N(opt_prof)
+#define	opt_prof_accum JEMALLOC_N(opt_prof_accum)
+#define	opt_prof_active JEMALLOC_N(opt_prof_active)
+#define	opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
+#define	opt_prof_leak JEMALLOC_N(opt_prof_leak)
+#define	opt_stats_print JEMALLOC_N(opt_stats_print)
+#define	opt_tcache JEMALLOC_N(opt_tcache)
+#define	opt_utrace JEMALLOC_N(opt_utrace)
+#define	opt_xmalloc JEMALLOC_N(opt_xmalloc)
+#define	opt_zero JEMALLOC_N(opt_zero)
+#define	p2rz JEMALLOC_N(p2rz)
+#define	pow2_ceil JEMALLOC_N(pow2_ceil)
+#define	prof_backtrace JEMALLOC_N(prof_backtrace)
+#define	prof_boot0 JEMALLOC_N(prof_boot0)
+#define	prof_boot1 JEMALLOC_N(prof_boot1)
+#define	prof_boot2 JEMALLOC_N(prof_boot2)
+#define	prof_ctx_get JEMALLOC_N(prof_ctx_get)
+#define	prof_ctx_set JEMALLOC_N(prof_ctx_set)
+#define	prof_free JEMALLOC_N(prof_free)
+#define	prof_gdump JEMALLOC_N(prof_gdump)
+#define	prof_idump JEMALLOC_N(prof_idump)
+#define	prof_lookup JEMALLOC_N(prof_lookup)
+#define	prof_malloc JEMALLOC_N(prof_malloc)
+#define	prof_mdump JEMALLOC_N(prof_mdump)
+#define	prof_realloc JEMALLOC_N(prof_realloc)
+#define	prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
+#define	prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
+#define	prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
+#define	prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
+#define	prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
+#define	prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
+#define	prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
+#define	pthread_create JEMALLOC_N(pthread_create)
+#define	quarantine JEMALLOC_N(quarantine)
+#define	quarantine_boot JEMALLOC_N(quarantine_boot)
+#define	quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
+#define	quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
+#define	quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
+#define	quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
+#define	register_zone JEMALLOC_N(register_zone)
+#define	rtree_get JEMALLOC_N(rtree_get)
+#define	rtree_get_locked JEMALLOC_N(rtree_get_locked)
+#define	rtree_new JEMALLOC_N(rtree_new)
+#define	rtree_set JEMALLOC_N(rtree_set)
+#define	s2u JEMALLOC_N(s2u)
+#define	sa2u JEMALLOC_N(sa2u)
+#define	stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index)
+#define	stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
+#define	stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
+#define	stats_cactive JEMALLOC_N(stats_cactive)
+#define	stats_cactive_add JEMALLOC_N(stats_cactive_add)
+#define	stats_cactive_get JEMALLOC_N(stats_cactive_get)
+#define	stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
+#define	stats_print JEMALLOC_N(stats_print)
+#define	tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
+#define	tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
+#define	tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
+#define	tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
+#define	tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
+#define	tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
+#define	tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
+#define	tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
+#define	tcache_boot0 JEMALLOC_N(tcache_boot0)
+#define	tcache_boot1 JEMALLOC_N(tcache_boot1)
+#define	tcache_create JEMALLOC_N(tcache_create)
+#define	tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
+#define	tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
+#define	tcache_destroy JEMALLOC_N(tcache_destroy)
+#define	tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
+#define	tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
+#define	tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
+#define	tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
+#define	tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
+#define	tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
+#define	tcache_event JEMALLOC_N(tcache_event)
+#define	tcache_flush JEMALLOC_N(tcache_flush)
+#define	tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
+#define	tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
+#define	tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
+#define	tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
+#define	tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
+#define	tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
+#define	thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
+#define	thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
+#define	thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
+#define	thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
+#define	u2rz JEMALLOC_N(u2rz)
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/prng.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/prng.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,60 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/*
+ * Simple linear congruential pseudo-random number generator:
+ *
+ *   prng(y) = (a*x + c) % m
+ *
+ * where the following constants ensure maximal period:
+ *
+ *   a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
+ *   c == Odd number (relatively prime to 2^n).
+ *   m == 2^32
+ *
+ * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
+ *
+ * This choice of m has the disadvantage that the quality of the bits is
+ * proportional to bit position.  For example. the lowest bit has a cycle of 2,
+ * the next has a cycle of 4, etc.  For this reason, we prefer to use the upper
+ * bits.
+ *
+ * Macro parameters:
+ *   uint32_t r          : Result.
+ *   unsigned lg_range   : (0..32], number of least significant bits to return.
+ *   uint32_t state      : Seed value.
+ *   const uint32_t a, c : See above discussion.
+ */
+#define prng32(r, lg_range, state, a, c) do {				\
+	assert(lg_range > 0);						\
+	assert(lg_range <= 32);						\
+									\
+	r = (state * (a)) + (c);					\
+	state = r;							\
+	r >>= (32 - lg_range);						\
+} while (false)
+
+/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
+#define prng64(r, lg_range, state, a, c) do {				\
+	assert(lg_range > 0);						\
+	assert(lg_range <= 64);						\
+									\
+	r = (state * (a)) + (c);					\
+	state = r;							\
+	r >>= (64 - lg_range);						\
+} while (false)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/prof.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/prof.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,535 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct prof_bt_s prof_bt_t;
+typedef struct prof_cnt_s prof_cnt_t;
+typedef struct prof_thr_cnt_s prof_thr_cnt_t;
+typedef struct prof_ctx_s prof_ctx_t;
+typedef struct prof_tdata_s prof_tdata_t;
+
+/* Option defaults. */
+#define	PROF_PREFIX_DEFAULT		"jeprof"
+#define	LG_PROF_SAMPLE_DEFAULT		0
+#define	LG_PROF_INTERVAL_DEFAULT	-1
+
+/*
+ * Hard limit on stack backtrace depth.  The version of prof_backtrace() that
+ * is based on __builtin_return_address() necessarily has a hard-coded number
+ * of backtrace frame handlers, and should be kept in sync with this setting.
+ */
+#define	PROF_BT_MAX			128
+
+/* Maximum number of backtraces to store in each per thread LRU cache. */
+#define	PROF_TCMAX			1024
+
+/* Initial hash table size. */
+#define	PROF_CKH_MINITEMS		64
+
+/* Size of memory buffer to use when writing dump files. */
+#define	PROF_DUMP_BUFSIZE		65536
+
+/* Size of stack-allocated buffer used by prof_printf(). */
+#define	PROF_PRINTF_BUFSIZE		128
+
+/*
+ * Number of mutexes shared among all ctx's.  No space is allocated for these
+ * unless profiling is enabled, so it's okay to over-provision.
+ */
+#define	PROF_NCTX_LOCKS			1024
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct prof_bt_s {
+	/* Backtrace, stored as len program counters. */
+	void		**vec;
+	unsigned	len;
+};
+
+#ifdef JEMALLOC_PROF_LIBGCC
+/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
+typedef struct {
+	prof_bt_t	*bt;
+	unsigned	nignore;
+	unsigned	max;
+} prof_unwind_data_t;
+#endif
+
+struct prof_cnt_s {
+	/*
+	 * Profiling counters.  An allocation/deallocation pair can operate on
+	 * different prof_thr_cnt_t objects that are linked into the same
+	 * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
+	 * negative.  In principle it is possible for the *bytes counters to
+	 * overflow/underflow, but a general solution would require something
+	 * like 128-bit counters; this implementation doesn't bother to solve
+	 * that problem.
+	 */
+	int64_t		curobjs;
+	int64_t		curbytes;
+	uint64_t	accumobjs;
+	uint64_t	accumbytes;
+};
+
+struct prof_thr_cnt_s {
+	/* Linkage into prof_ctx_t's cnts_ql. */
+	ql_elm(prof_thr_cnt_t)	cnts_link;
+
+	/* Linkage into thread's LRU. */
+	ql_elm(prof_thr_cnt_t)	lru_link;
+
+	/*
+	 * Associated context.  If a thread frees an object that it did not
+	 * allocate, it is possible that the context is not cached in the
+	 * thread's hash table, in which case it must be able to look up the
+	 * context, insert a new prof_thr_cnt_t into the thread's hash table,
+	 * and link it into the prof_ctx_t's cnts_ql.
+	 */
+	prof_ctx_t		*ctx;
+
+	/*
+	 * Threads use memory barriers to update the counters.  Since there is
+	 * only ever one writer, the only challenge is for the reader to get a
+	 * consistent read of the counters.
+	 *
+	 * The writer uses this series of operations:
+	 *
+	 * 1) Increment epoch to an odd number.
+	 * 2) Update counters.
+	 * 3) Increment epoch to an even number.
+	 *
+	 * The reader must assure 1) that the epoch is even while it reads the
+	 * counters, and 2) that the epoch doesn't change between the time it
+	 * starts and finishes reading the counters.
+	 */
+	unsigned		epoch;
+
+	/* Profiling counters. */
+	prof_cnt_t		cnts;
+};
+
+struct prof_ctx_s {
+	/* Associated backtrace. */
+	prof_bt_t		*bt;
+
+	/* Protects cnt_merged and cnts_ql. */
+	malloc_mutex_t		*lock;
+
+	/* Temporary storage for summation during dump. */
+	prof_cnt_t		cnt_summed;
+
+	/* When threads exit, they merge their stats into cnt_merged. */
+	prof_cnt_t		cnt_merged;
+
+	/*
+	 * List of profile counters, one for each thread that has allocated in
+	 * this context.
+	 */
+	ql_head(prof_thr_cnt_t)	cnts_ql;
+};
+
+struct prof_tdata_s {
+	/*
+	 * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *).  Each thread keeps a
+	 * cache of backtraces, with associated thread-specific prof_thr_cnt_t
+	 * objects.  Other threads may read the prof_thr_cnt_t contents, but no
+	 * others will ever write them.
+	 *
+	 * Upon thread exit, the thread must merge all the prof_thr_cnt_t
+	 * counter data into the associated prof_ctx_t objects, and unlink/free
+	 * the prof_thr_cnt_t objects.
+	 */
+	ckh_t			bt2cnt;
+
+	/* LRU for contents of bt2cnt. */
+	ql_head(prof_thr_cnt_t)	lru_ql;
+
+	/* Backtrace vector, used for calls to prof_backtrace(). */
+	void			**vec;
+
+	/* Sampling state. */
+	uint64_t		prng_state;
+	uint64_t		threshold;
+	uint64_t		accum;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+extern bool	opt_prof;
+/*
+ * Even if opt_prof is true, sampling can be temporarily disabled by setting
+ * opt_prof_active to false.  No locking is used when updating opt_prof_active,
+ * so there are no guarantees regarding how long it will take for all threads
+ * to notice state changes.
+ */
+extern bool	opt_prof_active;
+extern size_t	opt_lg_prof_sample;   /* Mean bytes between samples. */
+extern ssize_t	opt_lg_prof_interval; /* lg(prof_interval). */
+extern bool	opt_prof_gdump;       /* High-water memory dumping. */
+extern bool	opt_prof_leak;        /* Dump leak summary at exit. */
+extern bool	opt_prof_accum;       /* Report cumulative bytes. */
+extern char	opt_prof_prefix[PATH_MAX + 1];
+
+/*
+ * Profile dump interval, measured in bytes allocated.  Each arena triggers a
+ * profile dump when it reaches this threshold.  The effect is that the
+ * interval between profile dumps averages prof_interval, though the actual
+ * interval between dumps will tend to be sporadic, and the interval will be a
+ * maximum of approximately (prof_interval * narenas).
+ */
+extern uint64_t	prof_interval;
+
+/*
+ * If true, promote small sampled objects to large objects, since small run
+ * headers do not have embedded profile context pointers.
+ */
+extern bool	prof_promote;
+
+void	bt_init(prof_bt_t *bt, void **vec);
+void	prof_backtrace(prof_bt_t *bt, unsigned nignore);
+prof_thr_cnt_t	*prof_lookup(prof_bt_t *bt);
+void	prof_idump(void);
+bool	prof_mdump(const char *filename);
+void	prof_gdump(void);
+prof_tdata_t	*prof_tdata_init(void);
+void	prof_tdata_cleanup(void *arg);
+void	prof_boot0(void);
+void	prof_boot1(void);
+bool	prof_boot2(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#define	PROF_ALLOC_PREP(nignore, size, ret) do {			\
+	prof_tdata_t *prof_tdata;					\
+	prof_bt_t bt;							\
+									\
+	assert(size == s2u(size));					\
+									\
+	prof_tdata = *prof_tdata_tsd_get();				\
+	if (prof_tdata == NULL) {					\
+		prof_tdata = prof_tdata_init();				\
+		if (prof_tdata == NULL) {				\
+			ret = NULL;					\
+			break;						\
+		}							\
+	}								\
+									\
+	if (opt_prof_active == false) {					\
+		/* Sampling is currently inactive, so avoid sampling. */\
+		ret = (prof_thr_cnt_t *)(uintptr_t)1U;			\
+	} else if (opt_lg_prof_sample == 0) {				\
+		/* Don't bother with sampling logic, since sampling   */\
+		/* interval is 1.                                     */\
+		bt_init(&bt, prof_tdata->vec);				\
+		prof_backtrace(&bt, nignore);				\
+		ret = prof_lookup(&bt);					\
+	} else {							\
+		if (prof_tdata->threshold == 0) {			\
+			/* Initialize.  Seed the prng differently for */\
+			/* each thread.                               */\
+			prof_tdata->prng_state =			\
+			    (uint64_t)(uintptr_t)&size;			\
+			prof_sample_threshold_update(prof_tdata);	\
+		}							\
+									\
+		/* Determine whether to capture a backtrace based on  */\
+		/* whether size is enough for prof_accum to reach     */\
+		/* prof_tdata->threshold.  However, delay updating    */\
+		/* these variables until prof_{m,re}alloc(), because  */\
+		/* we don't know for sure that the allocation will    */\
+		/* succeed.                                           */\
+		/*                                                    */\
+		/* Use subtraction rather than addition to avoid      */\
+		/* potential integer overflow.                        */\
+		if (size >= prof_tdata->threshold -			\
+		    prof_tdata->accum) {				\
+			bt_init(&bt, prof_tdata->vec);			\
+			prof_backtrace(&bt, nignore);			\
+			ret = prof_lookup(&bt);				\
+		} else							\
+			ret = (prof_thr_cnt_t *)(uintptr_t)1U;		\
+	}								\
+} while (0)
+
+#ifndef JEMALLOC_ENABLE_INLINE
+malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
+
+void	prof_sample_threshold_update(prof_tdata_t *prof_tdata);
+prof_ctx_t	*prof_ctx_get(const void *ptr);
+void	prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
+bool	prof_sample_accum_update(size_t size);
+void	prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt);
+void	prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
+    size_t old_size, prof_ctx_t *old_ctx);
+void	prof_free(const void *ptr, size_t size);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
+/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
+malloc_tsd_externs(prof_tdata, prof_tdata_t *)
+malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
+    prof_tdata_cleanup)
+
+JEMALLOC_INLINE void
+prof_sample_threshold_update(prof_tdata_t *prof_tdata)
+{
+	uint64_t r;
+	double u;
+
+	cassert(config_prof);
+
+	/*
+	 * Compute sample threshold as a geometrically distributed random
+	 * variable with mean (2^opt_lg_prof_sample).
+	 *
+	 *                         __        __
+	 *                         |  log(u)  |                     1
+	 * prof_tdata->threshold = | -------- |, where p = -------------------
+	 *                         | log(1-p) |             opt_lg_prof_sample
+	 *                                                 2
+	 *
+	 * For more information on the math, see:
+	 *
+	 *   Non-Uniform Random Variate Generation
+	 *   Luc Devroye
+	 *   Springer-Verlag, New York, 1986
+	 *   pp 500
+	 *   (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
+	 */
+	prng64(r, 53, prof_tdata->prng_state,
+	    UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
+	u = (double)r * (1.0/9007199254740992.0L);
+	prof_tdata->threshold = (uint64_t)(log(u) /
+	    log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+	    + (uint64_t)1U;
+}
+
+JEMALLOC_INLINE prof_ctx_t *
+prof_ctx_get(const void *ptr)
+{
+	prof_ctx_t *ret;
+	arena_chunk_t *chunk;
+
+	cassert(config_prof);
+	assert(ptr != NULL);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr) {
+		/* Region. */
+		ret = arena_prof_ctx_get(ptr);
+	} else
+		ret = huge_prof_ctx_get(ptr);
+
+	return (ret);
+}
+
+JEMALLOC_INLINE void
+prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+{
+	arena_chunk_t *chunk;
+
+	cassert(config_prof);
+	assert(ptr != NULL);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr) {
+		/* Region. */
+		arena_prof_ctx_set(ptr, ctx);
+	} else
+		huge_prof_ctx_set(ptr, ctx);
+}
+
+JEMALLOC_INLINE bool
+prof_sample_accum_update(size_t size)
+{
+	prof_tdata_t *prof_tdata;
+
+	cassert(config_prof);
+	/* Sampling logic is unnecessary if the interval is 1. */
+	assert(opt_lg_prof_sample != 0);
+
+	prof_tdata = *prof_tdata_tsd_get();
+	assert(prof_tdata != NULL);
+
+	/* Take care to avoid integer overflow. */
+	if (size >= prof_tdata->threshold - prof_tdata->accum) {
+		prof_tdata->accum -= (prof_tdata->threshold - size);
+		/* Compute new sample threshold. */
+		prof_sample_threshold_update(prof_tdata);
+		while (prof_tdata->accum >= prof_tdata->threshold) {
+			prof_tdata->accum -= prof_tdata->threshold;
+			prof_sample_threshold_update(prof_tdata);
+		}
+		return (false);
+	} else {
+		prof_tdata->accum += size;
+		return (true);
+	}
+}
+
+JEMALLOC_INLINE void
+prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
+{
+
+	cassert(config_prof);
+	assert(ptr != NULL);
+	assert(size == isalloc(ptr, true));
+
+	if (opt_lg_prof_sample != 0) {
+		if (prof_sample_accum_update(size)) {
+			/*
+			 * Don't sample.  For malloc()-like allocation, it is
+			 * always possible to tell in advance how large an
+			 * object's usable size will be, so there should never
+			 * be a difference between the size passed to
+			 * PROF_ALLOC_PREP() and prof_malloc().
+			 */
+			assert((uintptr_t)cnt == (uintptr_t)1U);
+		}
+	}
+
+	if ((uintptr_t)cnt > (uintptr_t)1U) {
+		prof_ctx_set(ptr, cnt->ctx);
+
+		cnt->epoch++;
+		/*********/
+		mb_write();
+		/*********/
+		cnt->cnts.curobjs++;
+		cnt->cnts.curbytes += size;
+		if (opt_prof_accum) {
+			cnt->cnts.accumobjs++;
+			cnt->cnts.accumbytes += size;
+		}
+		/*********/
+		mb_write();
+		/*********/
+		cnt->epoch++;
+		/*********/
+		mb_write();
+		/*********/
+	} else
+		prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
+}
+
+JEMALLOC_INLINE void
+prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
+    size_t old_size, prof_ctx_t *old_ctx)
+{
+	prof_thr_cnt_t *told_cnt;
+
+	cassert(config_prof);
+	assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
+
+	if (ptr != NULL) {
+		assert(size == isalloc(ptr, true));
+		if (opt_lg_prof_sample != 0) {
+			if (prof_sample_accum_update(size)) {
+				/*
+				 * Don't sample.  The size passed to
+				 * PROF_ALLOC_PREP() was larger than what
+				 * actually got allocated, so a backtrace was
+				 * captured for this allocation, even though
+				 * its actual size was insufficient to cross
+				 * the sample threshold.
+				 */
+				cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
+			}
+		}
+	}
+
+	if ((uintptr_t)old_ctx > (uintptr_t)1U) {
+		told_cnt = prof_lookup(old_ctx->bt);
+		if (told_cnt == NULL) {
+			/*
+			 * It's too late to propagate OOM for this realloc(),
+			 * so operate directly on old_cnt->ctx->cnt_merged.
+			 */
+			malloc_mutex_lock(old_ctx->lock);
+			old_ctx->cnt_merged.curobjs--;
+			old_ctx->cnt_merged.curbytes -= old_size;
+			malloc_mutex_unlock(old_ctx->lock);
+			told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
+		}
+	} else
+		told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
+
+	if ((uintptr_t)told_cnt > (uintptr_t)1U)
+		told_cnt->epoch++;
+	if ((uintptr_t)cnt > (uintptr_t)1U) {
+		prof_ctx_set(ptr, cnt->ctx);
+		cnt->epoch++;
+	} else
+		prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
+	/*********/
+	mb_write();
+	/*********/
+	if ((uintptr_t)told_cnt > (uintptr_t)1U) {
+		told_cnt->cnts.curobjs--;
+		told_cnt->cnts.curbytes -= old_size;
+	}
+	if ((uintptr_t)cnt > (uintptr_t)1U) {
+		cnt->cnts.curobjs++;
+		cnt->cnts.curbytes += size;
+		if (opt_prof_accum) {
+			cnt->cnts.accumobjs++;
+			cnt->cnts.accumbytes += size;
+		}
+	}
+	/*********/
+	mb_write();
+	/*********/
+	if ((uintptr_t)told_cnt > (uintptr_t)1U)
+		told_cnt->epoch++;
+	if ((uintptr_t)cnt > (uintptr_t)1U)
+		cnt->epoch++;
+	/*********/
+	mb_write(); /* Not strictly necessary. */
+}
+
+JEMALLOC_INLINE void
+prof_free(const void *ptr, size_t size)
+{
+	prof_ctx_t *ctx = prof_ctx_get(ptr);
+
+	cassert(config_prof);
+
+	if ((uintptr_t)ctx > (uintptr_t)1) {
+		assert(size == isalloc(ptr, true));
+		prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt);
+
+		if (tcnt != NULL) {
+			tcnt->epoch++;
+			/*********/
+			mb_write();
+			/*********/
+			tcnt->cnts.curobjs--;
+			tcnt->cnts.curbytes -= size;
+			/*********/
+			mb_write();
+			/*********/
+			tcnt->epoch++;
+			/*********/
+			mb_write();
+			/*********/
+		} else {
+			/*
+			 * OOM during free() cannot be propagated, so operate
+			 * directly on cnt->ctx->cnt_merged.
+			 */
+			malloc_mutex_lock(ctx->lock);
+			ctx->cnt_merged.curobjs--;
+			ctx->cnt_merged.curbytes -= size;
+			malloc_mutex_unlock(ctx->lock);
+		}
+	}
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/ql.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/ql.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,83 @@
+/*
+ * List definitions.
+ */
+#define ql_head(a_type)							\
+struct {								\
+	a_type *qlh_first;						\
+}
+
+#define ql_head_initializer(a_head) {NULL}
+
+#define ql_elm(a_type)	qr(a_type)
+
+/* List functions. */
+#define ql_new(a_head) do {						\
+	(a_head)->qlh_first = NULL;					\
+} while (0)
+
+#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
+
+#define ql_first(a_head) ((a_head)->qlh_first)
+
+#define ql_last(a_head, a_field)					\
+	((ql_first(a_head) != NULL)					\
+	    ? qr_prev(ql_first(a_head), a_field) : NULL)
+
+#define ql_next(a_head, a_elm, a_field)					\
+	((ql_last(a_head, a_field) != (a_elm))				\
+	    ? qr_next((a_elm), a_field)	: NULL)
+
+#define ql_prev(a_head, a_elm, a_field)					\
+	((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field)	\
+				       : NULL)
+
+#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do {		\
+	qr_before_insert((a_qlelm), (a_elm), a_field);			\
+	if (ql_first(a_head) == (a_qlelm)) {				\
+		ql_first(a_head) = (a_elm);				\
+	}								\
+} while (0)
+
+#define ql_after_insert(a_qlelm, a_elm, a_field)			\
+	qr_after_insert((a_qlelm), (a_elm), a_field)
+
+#define ql_head_insert(a_head, a_elm, a_field) do {			\
+	if (ql_first(a_head) != NULL) {					\
+		qr_before_insert(ql_first(a_head), (a_elm), a_field);	\
+	}								\
+	ql_first(a_head) = (a_elm);					\
+} while (0)
+
+#define ql_tail_insert(a_head, a_elm, a_field) do {			\
+	if (ql_first(a_head) != NULL) {					\
+		qr_before_insert(ql_first(a_head), (a_elm), a_field);	\
+	}								\
+	ql_first(a_head) = qr_next((a_elm), a_field);			\
+} while (0)
+
+#define ql_remove(a_head, a_elm, a_field) do {				\
+	if (ql_first(a_head) == (a_elm)) {				\
+		ql_first(a_head) = qr_next(ql_first(a_head), a_field);	\
+	}								\
+	if (ql_first(a_head) != (a_elm)) {				\
+		qr_remove((a_elm), a_field);				\
+	} else {							\
+		ql_first(a_head) = NULL;				\
+	}								\
+} while (0)
+
+#define ql_head_remove(a_head, a_type, a_field) do {			\
+	a_type *t = ql_first(a_head);					\
+	ql_remove((a_head), t, a_field);				\
+} while (0)
+
+#define ql_tail_remove(a_head, a_type, a_field) do {			\
+	a_type *t = ql_last(a_head, a_field);				\
+	ql_remove((a_head), t, a_field);				\
+} while (0)
+
+#define ql_foreach(a_var, a_head, a_field)				\
+	qr_foreach((a_var), ql_first(a_head), a_field)
+
+#define ql_reverse_foreach(a_var, a_head, a_field)			\
+	qr_reverse_foreach((a_var), ql_first(a_head), a_field)
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/qr.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/qr.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,67 @@
+/* Ring definitions. */
+#define qr(a_type)							\
+struct {								\
+	a_type	*qre_next;						\
+	a_type	*qre_prev;						\
+}
+
+/* Ring functions. */
+#define qr_new(a_qr, a_field) do {					\
+	(a_qr)->a_field.qre_next = (a_qr);				\
+	(a_qr)->a_field.qre_prev = (a_qr);				\
+} while (0)
+
+#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
+
+#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
+
+#define qr_before_insert(a_qrelm, a_qr, a_field) do {			\
+	(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev;		\
+	(a_qr)->a_field.qre_next = (a_qrelm);				\
+	(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr);		\
+	(a_qrelm)->a_field.qre_prev = (a_qr);				\
+} while (0)
+
+#define qr_after_insert(a_qrelm, a_qr, a_field)				\
+    do									\
+    {									\
+	(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next;		\
+	(a_qr)->a_field.qre_prev = (a_qrelm);				\
+	(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr);		\
+	(a_qrelm)->a_field.qre_next = (a_qr);				\
+    } while (0)
+
+#define qr_meld(a_qr_a, a_qr_b, a_field) do {				\
+	void *t;							\
+	(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b);	\
+	(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a);	\
+	t = (a_qr_a)->a_field.qre_prev;					\
+	(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev;	\
+	(a_qr_b)->a_field.qre_prev = t;					\
+} while (0)
+
+/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
+ * have two copies of the code. */
+#define qr_split(a_qr_a, a_qr_b, a_field)				\
+	qr_meld((a_qr_a), (a_qr_b), a_field)
+
+#define qr_remove(a_qr, a_field) do {					\
+	(a_qr)->a_field.qre_prev->a_field.qre_next			\
+	    = (a_qr)->a_field.qre_next;					\
+	(a_qr)->a_field.qre_next->a_field.qre_prev			\
+	    = (a_qr)->a_field.qre_prev;					\
+	(a_qr)->a_field.qre_next = (a_qr);				\
+	(a_qr)->a_field.qre_prev = (a_qr);				\
+} while (0)
+
+#define qr_foreach(var, a_qr, a_field)					\
+	for ((var) = (a_qr);						\
+	    (var) != NULL;						\
+	    (var) = (((var)->a_field.qre_next != (a_qr))		\
+	    ? (var)->a_field.qre_next : NULL))
+
+#define qr_reverse_foreach(var, a_qr, a_field)				\
+	for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL;	\
+	    (var) != NULL;						\
+	    (var) = (((var) != (a_qr))					\
+	    ? (var)->a_field.qre_prev : NULL))
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/quarantine.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/quarantine.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,24 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/* Default per thread quarantine size if valgrind is enabled. */
+#define	JEMALLOC_VALGRIND_QUARANTINE_DEFAULT	(ZU(1) << 24)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void	quarantine(void *ptr);
+bool	quarantine_boot(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/rb.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/rb.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,973 @@
+/*-
+ *******************************************************************************
+ *
+ * cpp macro implementation of left-leaning 2-3 red-black trees.  Parent
+ * pointers are not used, and color bits are stored in the least significant
+ * bit of right-child pointers (if RB_COMPACT is defined), thus making node
+ * linkage as compact as is possible for red-black trees.
+ *
+ * Usage:
+ *
+ *   #include <stdint.h>
+ *   #include <stdbool.h>
+ *   #define NDEBUG // (Optional, see assert(3).)
+ *   #include <assert.h>
+ *   #define RB_COMPACT // (Optional, embed color bits in right-child pointers.)
+ *   #include <rb.h>
+ *   ...
+ *
+ *******************************************************************************
+ */
+
+#ifndef RB_H_
+#define	RB_H_
+
+#if 0
+__FBSDID("$FreeBSD: head/contrib/jemalloc/include/jemalloc/internal/rb.h 234370 2012-04-17 07:22:14Z jasone $");
+#endif
+
+#ifdef RB_COMPACT
+/* Node structure. */
+#define	rb_node(a_type)							\
+struct {								\
+    a_type *rbn_left;							\
+    a_type *rbn_right_red;						\
+}
+#else
+#define	rb_node(a_type)							\
+struct {								\
+    a_type *rbn_left;							\
+    a_type *rbn_right;							\
+    bool rbn_red;							\
+}
+#endif
+
+/* Root structure. */
+#define	rb_tree(a_type)							\
+struct {								\
+    a_type *rbt_root;							\
+    a_type rbt_nil;							\
+}
+
+/* Left accessors. */
+#define	rbtn_left_get(a_type, a_field, a_node)				\
+    ((a_node)->a_field.rbn_left)
+#define	rbtn_left_set(a_type, a_field, a_node, a_left) do {		\
+    (a_node)->a_field.rbn_left = a_left;				\
+} while (0)
+
+#ifdef RB_COMPACT
+/* Right accessors. */
+#define	rbtn_right_get(a_type, a_field, a_node)				\
+    ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red)		\
+      & ((ssize_t)-2)))
+#define	rbtn_right_set(a_type, a_field, a_node, a_right) do {		\
+    (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right)	\
+      | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1)));	\
+} while (0)
+
+/* Color accessors. */
+#define	rbtn_red_get(a_type, a_field, a_node)				\
+    ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red)		\
+      & ((size_t)1)))
+#define	rbtn_color_set(a_type, a_field, a_node, a_red) do {		\
+    (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t)		\
+      (a_node)->a_field.rbn_right_red) & ((ssize_t)-2))			\
+      | ((ssize_t)a_red));						\
+} while (0)
+#define	rbtn_red_set(a_type, a_field, a_node) do {			\
+    (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t)		\
+      (a_node)->a_field.rbn_right_red) | ((size_t)1));			\
+} while (0)
+#define	rbtn_black_set(a_type, a_field, a_node) do {			\
+    (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t)		\
+      (a_node)->a_field.rbn_right_red) & ((ssize_t)-2));		\
+} while (0)
+#else
+/* Right accessors. */
+#define	rbtn_right_get(a_type, a_field, a_node)				\
+    ((a_node)->a_field.rbn_right)
+#define	rbtn_right_set(a_type, a_field, a_node, a_right) do {		\
+    (a_node)->a_field.rbn_right = a_right;				\
+} while (0)
+
+/* Color accessors. */
+#define	rbtn_red_get(a_type, a_field, a_node)				\
+    ((a_node)->a_field.rbn_red)
+#define	rbtn_color_set(a_type, a_field, a_node, a_red) do {		\
+    (a_node)->a_field.rbn_red = (a_red);				\
+} while (0)
+#define	rbtn_red_set(a_type, a_field, a_node) do {			\
+    (a_node)->a_field.rbn_red = true;					\
+} while (0)
+#define	rbtn_black_set(a_type, a_field, a_node) do {			\
+    (a_node)->a_field.rbn_red = false;					\
+} while (0)
+#endif
+
+/* Node initializer. */
+#define	rbt_node_new(a_type, a_field, a_rbt, a_node) do {		\
+    rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil);	\
+    rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil);	\
+    rbtn_red_set(a_type, a_field, (a_node));				\
+} while (0)
+
+/* Tree initializer. */
+#define	rb_new(a_type, a_field, a_rbt) do {				\
+    (a_rbt)->rbt_root = &(a_rbt)->rbt_nil;				\
+    rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil);		\
+    rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil);			\
+} while (0)
+
+/* Internal utility macros. */
+#define	rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do {		\
+    (r_node) = (a_root);						\
+    if ((r_node) != &(a_rbt)->rbt_nil) {				\
+	for (;								\
+	  rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
+	  (r_node) = rbtn_left_get(a_type, a_field, (r_node))) {	\
+	}								\
+    }									\
+} while (0)
+
+#define	rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do {		\
+    (r_node) = (a_root);						\
+    if ((r_node) != &(a_rbt)->rbt_nil) {				\
+	for (; rbtn_right_get(a_type, a_field, (r_node)) !=		\
+	  &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field,	\
+	  (r_node))) {							\
+	}								\
+    }									\
+} while (0)
+
+#define	rbtn_rotate_left(a_type, a_field, a_node, r_node) do {		\
+    (r_node) = rbtn_right_get(a_type, a_field, (a_node));		\
+    rbtn_right_set(a_type, a_field, (a_node),				\
+      rbtn_left_get(a_type, a_field, (r_node)));			\
+    rbtn_left_set(a_type, a_field, (r_node), (a_node));			\
+} while (0)
+
+#define	rbtn_rotate_right(a_type, a_field, a_node, r_node) do {		\
+    (r_node) = rbtn_left_get(a_type, a_field, (a_node));		\
+    rbtn_left_set(a_type, a_field, (a_node),				\
+      rbtn_right_get(a_type, a_field, (r_node)));			\
+    rbtn_right_set(a_type, a_field, (r_node), (a_node));		\
+} while (0)
+
+/*
+ * The rb_proto() macro generates function prototypes that correspond to the
+ * functions generated by an equivalently parameterized call to rb_gen().
+ */
+
+#define	rb_proto(a_attr, a_prefix, a_rbt_type, a_type)			\
+a_attr void								\
+a_prefix##new(a_rbt_type *rbtree);					\
+a_attr a_type *								\
+a_prefix##first(a_rbt_type *rbtree);					\
+a_attr a_type *								\
+a_prefix##last(a_rbt_type *rbtree);					\
+a_attr a_type *								\
+a_prefix##next(a_rbt_type *rbtree, a_type *node);			\
+a_attr a_type *								\
+a_prefix##prev(a_rbt_type *rbtree, a_type *node);			\
+a_attr a_type *								\
+a_prefix##search(a_rbt_type *rbtree, a_type *key);			\
+a_attr a_type *								\
+a_prefix##nsearch(a_rbt_type *rbtree, a_type *key);			\
+a_attr a_type *								\
+a_prefix##psearch(a_rbt_type *rbtree, a_type *key);			\
+a_attr void								\
+a_prefix##insert(a_rbt_type *rbtree, a_type *node);			\
+a_attr void								\
+a_prefix##remove(a_rbt_type *rbtree, a_type *node);			\
+a_attr a_type *								\
+a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)(	\
+  a_rbt_type *, a_type *, void *), void *arg);				\
+a_attr a_type *								\
+a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,		\
+  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
+
+/*
+ * The rb_gen() macro generates a type-specific red-black tree implementation,
+ * based on the above cpp macros.
+ *
+ * Arguments:
+ *
+ *   a_attr    : Function attribute for generated functions (ex: static).
+ *   a_prefix  : Prefix for generated functions (ex: ex_).
+ *   a_rb_type : Type for red-black tree data structure (ex: ex_t).
+ *   a_type    : Type for red-black tree node data structure (ex: ex_node_t).
+ *   a_field   : Name of red-black tree node linkage (ex: ex_link).
+ *   a_cmp     : Node comparison function name, with the following prototype:
+ *                 int (a_cmp *)(a_type *a_node, a_type *a_other);
+ *                                       ^^^^^^
+ *                                    or a_key
+ *               Interpretation of comparision function return values:
+ *                 -1 : a_node <  a_other
+ *                  0 : a_node == a_other
+ *                  1 : a_node >  a_other
+ *               In all cases, the a_node or a_key macro argument is the first
+ *               argument to the comparison function, which makes it possible
+ *               to write comparison functions that treat the first argument
+ *               specially.
+ *
+ * Assuming the following setup:
+ *
+ *   typedef struct ex_node_s ex_node_t;
+ *   struct ex_node_s {
+ *       rb_node(ex_node_t) ex_link;
+ *   };
+ *   typedef rb_tree(ex_node_t) ex_t;
+ *   rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp)
+ *
+ * The following API is generated:
+ *
+ *   static void
+ *   ex_new(ex_t *tree);
+ *       Description: Initialize a red-black tree structure.
+ *       Args:
+ *         tree: Pointer to an uninitialized red-black tree object.
+ *
+ *   static ex_node_t *
+ *   ex_first(ex_t *tree);
+ *   static ex_node_t *
+ *   ex_last(ex_t *tree);
+ *       Description: Get the first/last node in tree.
+ *       Args:
+ *         tree: Pointer to an initialized red-black tree object.
+ *       Ret: First/last node in tree, or NULL if tree is empty.
+ *
+ *   static ex_node_t *
+ *   ex_next(ex_t *tree, ex_node_t *node);
+ *   static ex_node_t *
+ *   ex_prev(ex_t *tree, ex_node_t *node);
+ *       Description: Get node's successor/predecessor.
+ *       Args:
+ *         tree: Pointer to an initialized red-black tree object.
+ *         node: A node in tree.
+ *       Ret: node's successor/predecessor in tree, or NULL if node is
+ *            last/first.
+ *
+ *   static ex_node_t *
+ *   ex_search(ex_t *tree, ex_node_t *key);
+ *       Description: Search for node that matches key.
+ *       Args:
+ *         tree: Pointer to an initialized red-black tree object.
+ *         key : Search key.
+ *       Ret: Node in tree that matches key, or NULL if no match.
+ *
+ *   static ex_node_t *
+ *   ex_nsearch(ex_t *tree, ex_node_t *key);
+ *   static ex_node_t *
+ *   ex_psearch(ex_t *tree, ex_node_t *key);
+ *       Description: Search for node that matches key.  If no match is found,
+ *                    return what would be key's successor/predecessor, were
+ *                    key in tree.
+ *       Args:
+ *         tree: Pointer to an initialized red-black tree object.
+ *         key : Search key.
+ *       Ret: Node in tree that matches key, or if no match, hypothetical node's
+ *            successor/predecessor (NULL if no successor/predecessor).
+ *
+ *   static void
+ *   ex_insert(ex_t *tree, ex_node_t *node);
+ *       Description: Insert node into tree.
+ *       Args:
+ *         tree: Pointer to an initialized red-black tree object.
+ *         node: Node to be inserted into tree.
+ *
+ *   static void
+ *   ex_remove(ex_t *tree, ex_node_t *node);
+ *       Description: Remove node from tree.
+ *       Args:
+ *         tree: Pointer to an initialized red-black tree object.
+ *         node: Node in tree to be removed.
+ *
+ *   static ex_node_t *
+ *   ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
+ *     ex_node_t *, void *), void *arg);
+ *   static ex_node_t *
+ *   ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
+ *     ex_node_t *, void *), void *arg);
+ *       Description: Iterate forward/backward over tree, starting at node.  If
+ *                    tree is modified, iteration must be immediately
+ *                    terminated by the callback function that causes the
+ *                    modification.
+ *       Args:
+ *         tree : Pointer to an initialized red-black tree object.
+ *         start: Node at which to start iteration, or NULL to start at
+ *                first/last node.
+ *         cb   : Callback function, which is called for each node during
+ *                iteration.  Under normal circumstances the callback function
+ *                should return NULL, which causes iteration to continue.  If a
+ *                callback function returns non-NULL, iteration is immediately
+ *                terminated and the non-NULL return value is returned by the
+ *                iterator.  This is useful for re-starting iteration after
+ *                modifying tree.
+ *         arg  : Opaque pointer passed to cb().
+ *       Ret: NULL if iteration completed, or the non-NULL callback return value
+ *            that caused termination of the iteration.
+ */
+#define	rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp)	\
+a_attr void								\
+a_prefix##new(a_rbt_type *rbtree) {					\
+    rb_new(a_type, a_field, rbtree);					\
+}									\
+a_attr a_type *								\
+a_prefix##first(a_rbt_type *rbtree) {					\
+    a_type *ret;							\
+    rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret);		\
+    if (ret == &rbtree->rbt_nil) {					\
+	ret = NULL;							\
+    }									\
+    return (ret);							\
+}									\
+a_attr a_type *								\
+a_prefix##last(a_rbt_type *rbtree) {					\
+    a_type *ret;							\
+    rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret);		\
+    if (ret == &rbtree->rbt_nil) {					\
+	ret = NULL;							\
+    }									\
+    return (ret);							\
+}									\
+a_attr a_type *								\
+a_prefix##next(a_rbt_type *rbtree, a_type *node) {			\
+    a_type *ret;							\
+    if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) {	\
+	rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type,	\
+	  a_field, node), ret);						\
+    } else {								\
+	a_type *tnode = rbtree->rbt_root;				\
+	assert(tnode != &rbtree->rbt_nil);				\
+	ret = &rbtree->rbt_nil;						\
+	while (true) {							\
+	    int cmp = (a_cmp)(node, tnode);				\
+	    if (cmp < 0) {						\
+		ret = tnode;						\
+		tnode = rbtn_left_get(a_type, a_field, tnode);		\
+	    } else if (cmp > 0) {					\
+		tnode = rbtn_right_get(a_type, a_field, tnode);		\
+	    } else {							\
+		break;							\
+	    }								\
+	    assert(tnode != &rbtree->rbt_nil);				\
+	}								\
+    }									\
+    if (ret == &rbtree->rbt_nil) {					\
+	ret = (NULL);							\
+    }									\
+    return (ret);							\
+}									\
+a_attr a_type *								\
+a_prefix##prev(a_rbt_type *rbtree, a_type *node) {			\
+    a_type *ret;							\
+    if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) {	\
+	rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type,	\
+	  a_field, node), ret);						\
+    } else {								\
+	a_type *tnode = rbtree->rbt_root;				\
+	assert(tnode != &rbtree->rbt_nil);				\
+	ret = &rbtree->rbt_nil;						\
+	while (true) {							\
+	    int cmp = (a_cmp)(node, tnode);				\
+	    if (cmp < 0) {						\
+		tnode = rbtn_left_get(a_type, a_field, tnode);		\
+	    } else if (cmp > 0) {					\
+		ret = tnode;						\
+		tnode = rbtn_right_get(a_type, a_field, tnode);		\
+	    } else {							\
+		break;							\
+	    }								\
+	    assert(tnode != &rbtree->rbt_nil);				\
+	}								\
+    }									\
+    if (ret == &rbtree->rbt_nil) {					\
+	ret = (NULL);							\
+    }									\
+    return (ret);							\
+}									\
+a_attr a_type *								\
+a_prefix##search(a_rbt_type *rbtree, a_type *key) {			\
+    a_type *ret;							\
+    int cmp;								\
+    ret = rbtree->rbt_root;						\
+    while (ret != &rbtree->rbt_nil					\
+      && (cmp = (a_cmp)(key, ret)) != 0) {				\
+	if (cmp < 0) {							\
+	    ret = rbtn_left_get(a_type, a_field, ret);			\
+	} else {							\
+	    ret = rbtn_right_get(a_type, a_field, ret);			\
+	}								\
+    }									\
+    if (ret == &rbtree->rbt_nil) {					\
+	ret = (NULL);							\
+    }									\
+    return (ret);							\
+}									\
+a_attr a_type *								\
+a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) {			\
+    a_type *ret;							\
+    a_type *tnode = rbtree->rbt_root;					\
+    ret = &rbtree->rbt_nil;						\
+    while (tnode != &rbtree->rbt_nil) {					\
+	int cmp = (a_cmp)(key, tnode);					\
+	if (cmp < 0) {							\
+	    ret = tnode;						\
+	    tnode = rbtn_left_get(a_type, a_field, tnode);		\
+	} else if (cmp > 0) {						\
+	    tnode = rbtn_right_get(a_type, a_field, tnode);		\
+	} else {							\
+	    ret = tnode;						\
+	    break;							\
+	}								\
+    }									\
+    if (ret == &rbtree->rbt_nil) {					\
+	ret = (NULL);							\
+    }									\
+    return (ret);							\
+}									\
+a_attr a_type *								\
+a_prefix##psearch(a_rbt_type *rbtree, a_type *key) {			\
+    a_type *ret;							\
+    a_type *tnode = rbtree->rbt_root;					\
+    ret = &rbtree->rbt_nil;						\
+    while (tnode != &rbtree->rbt_nil) {					\
+	int cmp = (a_cmp)(key, tnode);					\
+	if (cmp < 0) {							\
+	    tnode = rbtn_left_get(a_type, a_field, tnode);		\
+	} else if (cmp > 0) {						\
+	    ret = tnode;						\
+	    tnode = rbtn_right_get(a_type, a_field, tnode);		\
+	} else {							\
+	    ret = tnode;						\
+	    break;							\
+	}								\
+    }									\
+    if (ret == &rbtree->rbt_nil) {					\
+	ret = (NULL);							\
+    }									\
+    return (ret);							\
+}									\
+a_attr void								\
+a_prefix##insert(a_rbt_type *rbtree, a_type *node) {			\
+    struct {								\
+	a_type *node;							\
+	int cmp;							\
+    } path[sizeof(void *) << 4], *pathp;				\
+    rbt_node_new(a_type, a_field, rbtree, node);			\
+    /* Wind. */								\
+    path->node = rbtree->rbt_root;					\
+    for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) {	\
+	int cmp = pathp->cmp = a_cmp(node, pathp->node);		\
+	assert(cmp != 0);						\
+	if (cmp < 0) {							\
+	    pathp[1].node = rbtn_left_get(a_type, a_field,		\
+	      pathp->node);						\
+	} else {							\
+	    pathp[1].node = rbtn_right_get(a_type, a_field,		\
+	      pathp->node);						\
+	}								\
+    }									\
+    pathp->node = node;							\
+    /* Unwind. */							\
+    for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) {	\
+	a_type *cnode = pathp->node;					\
+	if (pathp->cmp < 0) {						\
+	    a_type *left = pathp[1].node;				\
+	    rbtn_left_set(a_type, a_field, cnode, left);		\
+	    if (rbtn_red_get(a_type, a_field, left)) {			\
+		a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
+		if (rbtn_red_get(a_type, a_field, leftleft)) {		\
+		    /* Fix up 4-node. */				\
+		    a_type *tnode;					\
+		    rbtn_black_set(a_type, a_field, leftleft);		\
+		    rbtn_rotate_right(a_type, a_field, cnode, tnode);	\
+		    cnode = tnode;					\
+		}							\
+	    } else {							\
+		return;							\
+	    }								\
+	} else {							\
+	    a_type *right = pathp[1].node;				\
+	    rbtn_right_set(a_type, a_field, cnode, right);		\
+	    if (rbtn_red_get(a_type, a_field, right)) {			\
+		a_type *left = rbtn_left_get(a_type, a_field, cnode);	\
+		if (rbtn_red_get(a_type, a_field, left)) {		\
+		    /* Split 4-node. */					\
+		    rbtn_black_set(a_type, a_field, left);		\
+		    rbtn_black_set(a_type, a_field, right);		\
+		    rbtn_red_set(a_type, a_field, cnode);		\
+		} else {						\
+		    /* Lean left. */					\
+		    a_type *tnode;					\
+		    bool tred = rbtn_red_get(a_type, a_field, cnode);	\
+		    rbtn_rotate_left(a_type, a_field, cnode, tnode);	\
+		    rbtn_color_set(a_type, a_field, tnode, tred);	\
+		    rbtn_red_set(a_type, a_field, cnode);		\
+		    cnode = tnode;					\
+		}							\
+	    } else {							\
+		return;							\
+	    }								\
+	}								\
+	pathp->node = cnode;						\
+    }									\
+    /* Set root, and make it black. */					\
+    rbtree->rbt_root = path->node;					\
+    rbtn_black_set(a_type, a_field, rbtree->rbt_root);			\
+}									\
+a_attr void								\
+a_prefix##remove(a_rbt_type *rbtree, a_type *node) {			\
+    struct {								\
+	a_type *node;							\
+	int cmp;							\
+    } *pathp, *nodep, path[sizeof(void *) << 4];			\
+    /* Wind. */								\
+    nodep = NULL; /* Silence compiler warning. */			\
+    path->node = rbtree->rbt_root;					\
+    for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) {	\
+	int cmp = pathp->cmp = a_cmp(node, pathp->node);		\
+	if (cmp < 0) {							\
+	    pathp[1].node = rbtn_left_get(a_type, a_field,		\
+	      pathp->node);						\
+	} else {							\
+	    pathp[1].node = rbtn_right_get(a_type, a_field,		\
+	      pathp->node);						\
+	    if (cmp == 0) {						\
+	        /* Find node's successor, in preparation for swap. */	\
+		pathp->cmp = 1;						\
+		nodep = pathp;						\
+		for (pathp++; pathp->node != &rbtree->rbt_nil;		\
+		  pathp++) {						\
+		    pathp->cmp = -1;					\
+		    pathp[1].node = rbtn_left_get(a_type, a_field,	\
+		      pathp->node);					\
+		}							\
+		break;							\
+	    }								\
+	}								\
+    }									\
+    assert(nodep->node == node);					\
+    pathp--;								\
+    if (pathp->node != node) {						\
+	/* Swap node with its successor. */				\
+	bool tred = rbtn_red_get(a_type, a_field, pathp->node);		\
+	rbtn_color_set(a_type, a_field, pathp->node,			\
+	  rbtn_red_get(a_type, a_field, node));				\
+	rbtn_left_set(a_type, a_field, pathp->node,			\
+	  rbtn_left_get(a_type, a_field, node));			\
+	/* If node's successor is its right child, the following code */\
+	/* will do the wrong thing for the right child pointer.       */\
+	/* However, it doesn't matter, because the pointer will be    */\
+	/* properly set when the successor is pruned.                 */\
+	rbtn_right_set(a_type, a_field, pathp->node,			\
+	  rbtn_right_get(a_type, a_field, node));			\
+	rbtn_color_set(a_type, a_field, node, tred);			\
+	/* The pruned leaf node's child pointers are never accessed   */\
+	/* again, so don't bother setting them to nil.                */\
+	nodep->node = pathp->node;					\
+	pathp->node = node;						\
+	if (nodep == path) {						\
+	    rbtree->rbt_root = nodep->node;				\
+	} else {							\
+	    if (nodep[-1].cmp < 0) {					\
+		rbtn_left_set(a_type, a_field, nodep[-1].node,		\
+		  nodep->node);						\
+	    } else {							\
+		rbtn_right_set(a_type, a_field, nodep[-1].node,		\
+		  nodep->node);						\
+	    }								\
+	}								\
+    } else {								\
+	a_type *left = rbtn_left_get(a_type, a_field, node);		\
+	if (left != &rbtree->rbt_nil) {					\
+	    /* node has no successor, but it has a left child.        */\
+	    /* Splice node out, without losing the left child.        */\
+	    assert(rbtn_red_get(a_type, a_field, node) == false);	\
+	    assert(rbtn_red_get(a_type, a_field, left));		\
+	    rbtn_black_set(a_type, a_field, left);			\
+	    if (pathp == path) {					\
+		rbtree->rbt_root = left;				\
+	    } else {							\
+		if (pathp[-1].cmp < 0) {				\
+		    rbtn_left_set(a_type, a_field, pathp[-1].node,	\
+		      left);						\
+		} else {						\
+		    rbtn_right_set(a_type, a_field, pathp[-1].node,	\
+		      left);						\
+		}							\
+	    }								\
+	    return;							\
+	} else if (pathp == path) {					\
+	    /* The tree only contained one node. */			\
+	    rbtree->rbt_root = &rbtree->rbt_nil;			\
+	    return;							\
+	}								\
+    }									\
+    if (rbtn_red_get(a_type, a_field, pathp->node)) {			\
+	/* Prune red node, which requires no fixup. */			\
+	assert(pathp[-1].cmp < 0);					\
+	rbtn_left_set(a_type, a_field, pathp[-1].node,			\
+	  &rbtree->rbt_nil);						\
+	return;								\
+    }									\
+    /* The node to be pruned is black, so unwind until balance is     */\
+    /* restored.                                                      */\
+    pathp->node = &rbtree->rbt_nil;					\
+    for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) {	\
+	assert(pathp->cmp != 0);					\
+	if (pathp->cmp < 0) {						\
+	    rbtn_left_set(a_type, a_field, pathp->node,			\
+	      pathp[1].node);						\
+	    assert(rbtn_red_get(a_type, a_field, pathp[1].node)		\
+	      == false);						\
+	    if (rbtn_red_get(a_type, a_field, pathp->node)) {		\
+		a_type *right = rbtn_right_get(a_type, a_field,		\
+		  pathp->node);						\
+		a_type *rightleft = rbtn_left_get(a_type, a_field,	\
+		  right);						\
+		a_type *tnode;						\
+		if (rbtn_red_get(a_type, a_field, rightleft)) {		\
+		    /* In the following diagrams, ||, //, and \\      */\
+		    /* indicate the path to the removed node.         */\
+		    /*                                                */\
+		    /*      ||                                        */\
+		    /*    pathp(r)                                    */\
+		    /*  //        \                                   */\
+		    /* (b)        (b)                                 */\
+		    /*           /                                    */\
+		    /*          (r)                                   */\
+		    /*                                                */\
+		    rbtn_black_set(a_type, a_field, pathp->node);	\
+		    rbtn_rotate_right(a_type, a_field, right, tnode);	\
+		    rbtn_right_set(a_type, a_field, pathp->node, tnode);\
+		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
+		      tnode);						\
+		} else {						\
+		    /*      ||                                        */\
+		    /*    pathp(r)                                    */\
+		    /*  //        \                                   */\
+		    /* (b)        (b)                                 */\
+		    /*           /                                    */\
+		    /*          (b)                                   */\
+		    /*                                                */\
+		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
+		      tnode);						\
+		}							\
+		/* Balance restored, but rotation modified subtree    */\
+		/* root.                                              */\
+		assert((uintptr_t)pathp > (uintptr_t)path);		\
+		if (pathp[-1].cmp < 0) {				\
+		    rbtn_left_set(a_type, a_field, pathp[-1].node,	\
+		      tnode);						\
+		} else {						\
+		    rbtn_right_set(a_type, a_field, pathp[-1].node,	\
+		      tnode);						\
+		}							\
+		return;							\
+	    } else {							\
+		a_type *right = rbtn_right_get(a_type, a_field,		\
+		  pathp->node);						\
+		a_type *rightleft = rbtn_left_get(a_type, a_field,	\
+		  right);						\
+		if (rbtn_red_get(a_type, a_field, rightleft)) {		\
+		    /*      ||                                        */\
+		    /*    pathp(b)                                    */\
+		    /*  //        \                                   */\
+		    /* (b)        (b)                                 */\
+		    /*           /                                    */\
+		    /*          (r)                                   */\
+		    a_type *tnode;					\
+		    rbtn_black_set(a_type, a_field, rightleft);		\
+		    rbtn_rotate_right(a_type, a_field, right, tnode);	\
+		    rbtn_right_set(a_type, a_field, pathp->node, tnode);\
+		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
+		      tnode);						\
+		    /* Balance restored, but rotation modified        */\
+		    /* subree root, which may actually be the tree    */\
+		    /* root.                                          */\
+		    if (pathp == path) {				\
+			/* Set root. */					\
+			rbtree->rbt_root = tnode;			\
+		    } else {						\
+			if (pathp[-1].cmp < 0) {			\
+			    rbtn_left_set(a_type, a_field,		\
+			      pathp[-1].node, tnode);			\
+			} else {					\
+			    rbtn_right_set(a_type, a_field,		\
+			      pathp[-1].node, tnode);			\
+			}						\
+		    }							\
+		    return;						\
+		} else {						\
+		    /*      ||                                        */\
+		    /*    pathp(b)                                    */\
+		    /*  //        \                                   */\
+		    /* (b)        (b)                                 */\
+		    /*           /                                    */\
+		    /*          (b)                                   */\
+		    a_type *tnode;					\
+		    rbtn_red_set(a_type, a_field, pathp->node);		\
+		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
+		      tnode);						\
+		    pathp->node = tnode;				\
+		}							\
+	    }								\
+	} else {							\
+	    a_type *left;						\
+	    rbtn_right_set(a_type, a_field, pathp->node,		\
+	      pathp[1].node);						\
+	    left = rbtn_left_get(a_type, a_field, pathp->node);		\
+	    if (rbtn_red_get(a_type, a_field, left)) {			\
+		a_type *tnode;						\
+		a_type *leftright = rbtn_right_get(a_type, a_field,	\
+		  left);						\
+		a_type *leftrightleft = rbtn_left_get(a_type, a_field,	\
+		  leftright);						\
+		if (rbtn_red_get(a_type, a_field, leftrightleft)) {	\
+		    /*      ||                                        */\
+		    /*    pathp(b)                                    */\
+		    /*   /        \\                                  */\
+		    /* (r)        (b)                                 */\
+		    /*   \                                            */\
+		    /*   (b)                                          */\
+		    /*   /                                            */\
+		    /* (r)                                            */\
+		    a_type *unode;					\
+		    rbtn_black_set(a_type, a_field, leftrightleft);	\
+		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
+		      unode);						\
+		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
+		      tnode);						\
+		    rbtn_right_set(a_type, a_field, unode, tnode);	\
+		    rbtn_rotate_left(a_type, a_field, unode, tnode);	\
+		} else {						\
+		    /*      ||                                        */\
+		    /*    pathp(b)                                    */\
+		    /*   /        \\                                  */\
+		    /* (r)        (b)                                 */\
+		    /*   \                                            */\
+		    /*   (b)                                          */\
+		    /*   /                                            */\
+		    /* (b)                                            */\
+		    assert(leftright != &rbtree->rbt_nil);		\
+		    rbtn_red_set(a_type, a_field, leftright);		\
+		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
+		      tnode);						\
+		    rbtn_black_set(a_type, a_field, tnode);		\
+		}							\
+		/* Balance restored, but rotation modified subtree    */\
+		/* root, which may actually be the tree root.         */\
+		if (pathp == path) {					\
+		    /* Set root. */					\
+		    rbtree->rbt_root = tnode;				\
+		} else {						\
+		    if (pathp[-1].cmp < 0) {				\
+			rbtn_left_set(a_type, a_field, pathp[-1].node,	\
+			  tnode);					\
+		    } else {						\
+			rbtn_right_set(a_type, a_field, pathp[-1].node,	\
+			  tnode);					\
+		    }							\
+		}							\
+		return;							\
+	    } else if (rbtn_red_get(a_type, a_field, pathp->node)) {	\
+		a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
+		if (rbtn_red_get(a_type, a_field, leftleft)) {		\
+		    /*        ||                                      */\
+		    /*      pathp(r)                                  */\
+		    /*     /        \\                                */\
+		    /*   (b)        (b)                               */\
+		    /*   /                                            */\
+		    /* (r)                                            */\
+		    a_type *tnode;					\
+		    rbtn_black_set(a_type, a_field, pathp->node);	\
+		    rbtn_red_set(a_type, a_field, left);		\
+		    rbtn_black_set(a_type, a_field, leftleft);		\
+		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
+		      tnode);						\
+		    /* Balance restored, but rotation modified        */\
+		    /* subtree root.                                  */\
+		    assert((uintptr_t)pathp > (uintptr_t)path);		\
+		    if (pathp[-1].cmp < 0) {				\
+			rbtn_left_set(a_type, a_field, pathp[-1].node,	\
+			  tnode);					\
+		    } else {						\
+			rbtn_right_set(a_type, a_field, pathp[-1].node,	\
+			  tnode);					\
+		    }							\
+		    return;						\
+		} else {						\
+		    /*        ||                                      */\
+		    /*      pathp(r)                                  */\
+		    /*     /        \\                                */\
+		    /*   (b)        (b)                               */\
+		    /*   /                                            */\
+		    /* (b)                                            */\
+		    rbtn_red_set(a_type, a_field, left);		\
+		    rbtn_black_set(a_type, a_field, pathp->node);	\
+		    /* Balance restored. */				\
+		    return;						\
+		}							\
+	    } else {							\
+		a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
+		if (rbtn_red_get(a_type, a_field, leftleft)) {		\
+		    /*               ||                               */\
+		    /*             pathp(b)                           */\
+		    /*            /        \\                         */\
+		    /*          (b)        (b)                        */\
+		    /*          /                                     */\
+		    /*        (r)                                     */\
+		    a_type *tnode;					\
+		    rbtn_black_set(a_type, a_field, leftleft);		\
+		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
+		      tnode);						\
+		    /* Balance restored, but rotation modified        */\
+		    /* subtree root, which may actually be the tree   */\
+		    /* root.                                          */\
+		    if (pathp == path) {				\
+			/* Set root. */					\
+			rbtree->rbt_root = tnode;			\
+		    } else {						\
+			if (pathp[-1].cmp < 0) {			\
+			    rbtn_left_set(a_type, a_field,		\
+			      pathp[-1].node, tnode);			\
+			} else {					\
+			    rbtn_right_set(a_type, a_field,		\
+			      pathp[-1].node, tnode);			\
+			}						\
+		    }							\
+		    return;						\
+		} else {						\
+		    /*               ||                               */\
+		    /*             pathp(b)                           */\
+		    /*            /        \\                         */\
+		    /*          (b)        (b)                        */\
+		    /*          /                                     */\
+		    /*        (b)                                     */\
+		    rbtn_red_set(a_type, a_field, left);		\
+		}							\
+	    }								\
+	}								\
+    }									\
+    /* Set root. */							\
+    rbtree->rbt_root = path->node;					\
+    assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false);	\
+}									\
+a_attr a_type *								\
+a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node,		\
+  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
+    if (node == &rbtree->rbt_nil) {					\
+	return (&rbtree->rbt_nil);					\
+    } else {								\
+	a_type *ret;							\
+	if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type,	\
+	  a_field, node), cb, arg)) != &rbtree->rbt_nil			\
+	  || (ret = cb(rbtree, node, arg)) != NULL) {			\
+	    return (ret);						\
+	}								\
+	return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,	\
+	  a_field, node), cb, arg));					\
+    }									\
+}									\
+a_attr a_type *								\
+a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node,	\
+  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
+    int cmp = a_cmp(start, node);					\
+    if (cmp < 0) {							\
+	a_type *ret;							\
+	if ((ret = a_prefix##iter_start(rbtree, start,			\
+	  rbtn_left_get(a_type, a_field, node), cb, arg)) !=		\
+	  &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) {	\
+	    return (ret);						\
+	}								\
+	return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,	\
+	  a_field, node), cb, arg));					\
+    } else if (cmp > 0) {						\
+	return (a_prefix##iter_start(rbtree, start,			\
+	  rbtn_right_get(a_type, a_field, node), cb, arg));		\
+    } else {								\
+	a_type *ret;							\
+	if ((ret = cb(rbtree, node, arg)) != NULL) {			\
+	    return (ret);						\
+	}								\
+	return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,	\
+	  a_field, node), cb, arg));					\
+    }									\
+}									\
+a_attr a_type *								\
+a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)(	\
+  a_rbt_type *, a_type *, void *), void *arg) {				\
+    a_type *ret;							\
+    if (start != NULL) {						\
+	ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root,	\
+	  cb, arg);							\
+    } else {								\
+	ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
+    }									\
+    if (ret == &rbtree->rbt_nil) {					\
+	ret = NULL;							\
+    }									\
+    return (ret);							\
+}									\
+a_attr a_type *								\
+a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node,	\
+  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
+    if (node == &rbtree->rbt_nil) {					\
+	return (&rbtree->rbt_nil);					\
+    } else {								\
+	a_type *ret;							\
+	if ((ret = a_prefix##reverse_iter_recurse(rbtree,		\
+	  rbtn_right_get(a_type, a_field, node), cb, arg)) !=		\
+	  &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) {	\
+	    return (ret);						\
+	}								\
+	return (a_prefix##reverse_iter_recurse(rbtree,			\
+	  rbtn_left_get(a_type, a_field, node), cb, arg));		\
+    }									\
+}									\
+a_attr a_type *								\
+a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start,		\
+  a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *),		\
+  void *arg) {								\
+    int cmp = a_cmp(start, node);					\
+    if (cmp > 0) {							\
+	a_type *ret;							\
+	if ((ret = a_prefix##reverse_iter_start(rbtree, start,		\
+	  rbtn_right_get(a_type, a_field, node), cb, arg)) !=		\
+	  &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) {	\
+	    return (ret);						\
+	}								\
+	return (a_prefix##reverse_iter_recurse(rbtree,			\
+	  rbtn_left_get(a_type, a_field, node), cb, arg));		\
+    } else if (cmp < 0) {						\
+	return (a_prefix##reverse_iter_start(rbtree, start,		\
+	  rbtn_left_get(a_type, a_field, node), cb, arg));		\
+    } else {								\
+	a_type *ret;							\
+	if ((ret = cb(rbtree, node, arg)) != NULL) {			\
+	    return (ret);						\
+	}								\
+	return (a_prefix##reverse_iter_recurse(rbtree,			\
+	  rbtn_left_get(a_type, a_field, node), cb, arg));		\
+    }									\
+}									\
+a_attr a_type *								\
+a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,		\
+  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
+    a_type *ret;							\
+    if (start != NULL) {						\
+	ret = a_prefix##reverse_iter_start(rbtree, start,		\
+	  rbtree->rbt_root, cb, arg);					\
+    } else {								\
+	ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root,	\
+	  cb, arg);							\
+    }									\
+    if (ret == &rbtree->rbt_nil) {					\
+	ret = NULL;							\
+    }									\
+    return (ret);							\
+}
+
+#endif /* RB_H_ */
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/rtree.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/rtree.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,161 @@
+/*
+ * This radix tree implementation is tailored to the singular purpose of
+ * tracking which chunks are currently owned by jemalloc.  This functionality
+ * is mandatory for OS X, where jemalloc must be able to respond to object
+ * ownership queries.
+ *
+ *******************************************************************************
+ */
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct rtree_s rtree_t;
+
+/*
+ * Size of each radix tree node (must be a power of 2).  This impacts tree
+ * depth.
+ */
+#if (LG_SIZEOF_PTR == 2)
+#  define RTREE_NODESIZE (1U << 14)
+#else
+#  define RTREE_NODESIZE CACHELINE
+#endif
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct rtree_s {
+	malloc_mutex_t	mutex;
+	void		**root;
+	unsigned	height;
+	unsigned	level2bits[1]; /* Dynamically sized. */
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+rtree_t	*rtree_new(unsigned bits);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+#ifndef JEMALLOC_DEBUG
+void	*rtree_get_locked(rtree_t *rtree, uintptr_t key);
+#endif
+void	*rtree_get(rtree_t *rtree, uintptr_t key);
+bool	rtree_set(rtree_t *rtree, uintptr_t key, void *val);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
+#define	RTREE_GET_GENERATE(f)						\
+/* The least significant bits of the key are ignored. */		\
+JEMALLOC_INLINE void *							\
+f(rtree_t *rtree, uintptr_t key)					\
+{									\
+	void *ret;							\
+	uintptr_t subkey;						\
+	unsigned i, lshift, height, bits;				\
+	void **node, **child;						\
+									\
+	RTREE_LOCK(&rtree->mutex);					\
+	for (i = lshift = 0, height = rtree->height, node = rtree->root;\
+	    i < height - 1;						\
+	    i++, lshift += bits, node = child) {			\
+		bits = rtree->level2bits[i];				\
+		subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
+		    3)) - bits);					\
+		child = (void**)node[subkey];				\
+		if (child == NULL) {					\
+			RTREE_UNLOCK(&rtree->mutex);			\
+			return (NULL);					\
+		}							\
+	}								\
+									\
+	/*								\
+	 * node is a leaf, so it contains values rather than node	\
+	 * pointers.							\
+	 */								\
+	bits = rtree->level2bits[i];					\
+	subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -	\
+	    bits);							\
+	ret = node[subkey];						\
+	RTREE_UNLOCK(&rtree->mutex);					\
+									\
+	RTREE_GET_VALIDATE						\
+	return (ret);							\
+}
+
+#ifdef JEMALLOC_DEBUG
+#  define RTREE_LOCK(l)		malloc_mutex_lock(l)
+#  define RTREE_UNLOCK(l)	malloc_mutex_unlock(l)
+#  define RTREE_GET_VALIDATE
+RTREE_GET_GENERATE(rtree_get_locked)
+#  undef RTREE_LOCK
+#  undef RTREE_UNLOCK
+#  undef RTREE_GET_VALIDATE
+#endif
+
+#define	RTREE_LOCK(l)
+#define	RTREE_UNLOCK(l)
+#ifdef JEMALLOC_DEBUG
+   /*
+    * Suppose that it were possible for a jemalloc-allocated chunk to be
+    * munmap()ped, followed by a different allocator in another thread re-using
+    * overlapping virtual memory, all without invalidating the cached rtree
+    * value.  The result would be a false positive (the rtree would claim that
+    * jemalloc owns memory that it had actually discarded).  This scenario
+    * seems impossible, but the following assertion is a prudent sanity check.
+    */
+#  define RTREE_GET_VALIDATE						\
+	assert(rtree_get_locked(rtree, key) == ret);
+#else
+#  define RTREE_GET_VALIDATE
+#endif
+RTREE_GET_GENERATE(rtree_get)
+#undef RTREE_LOCK
+#undef RTREE_UNLOCK
+#undef RTREE_GET_VALIDATE
+
+JEMALLOC_INLINE bool
+rtree_set(rtree_t *rtree, uintptr_t key, void *val)
+{
+	uintptr_t subkey;
+	unsigned i, lshift, height, bits;
+	void **node, **child;
+
+	malloc_mutex_lock(&rtree->mutex);
+	for (i = lshift = 0, height = rtree->height, node = rtree->root;
+	    i < height - 1;
+	    i++, lshift += bits, node = child) {
+		bits = rtree->level2bits[i];
+		subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
+		    bits);
+		child = (void**)node[subkey];
+		if (child == NULL) {
+			child = (void**)base_alloc(sizeof(void *) <<
+			    rtree->level2bits[i+1]);
+			if (child == NULL) {
+				malloc_mutex_unlock(&rtree->mutex);
+				return (true);
+			}
+			memset(child, 0, sizeof(void *) <<
+			    rtree->level2bits[i+1]);
+			node[subkey] = child;
+		}
+	}
+
+	/* node is a leaf, so it contains values rather than node pointers. */
+	bits = rtree->level2bits[i];
+	subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
+	node[subkey] = val;
+	malloc_mutex_unlock(&rtree->mutex);
+
+	return (false);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/size_classes.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/size_classes.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,721 @@
+/* This file was automatically generated by size_classes.sh. */
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	8,	8)					\
+    SIZE_CLASS(1,	8,	16)					\
+    SIZE_CLASS(2,	8,	24)					\
+    SIZE_CLASS(3,	8,	32)					\
+    SIZE_CLASS(4,	8,	40)					\
+    SIZE_CLASS(5,	8,	48)					\
+    SIZE_CLASS(6,	8,	56)					\
+    SIZE_CLASS(7,	8,	64)					\
+    SIZE_CLASS(8,	16,	80)					\
+    SIZE_CLASS(9,	16,	96)					\
+    SIZE_CLASS(10,	16,	112)					\
+    SIZE_CLASS(11,	16,	128)					\
+    SIZE_CLASS(12,	32,	160)					\
+    SIZE_CLASS(13,	32,	192)					\
+    SIZE_CLASS(14,	32,	224)					\
+    SIZE_CLASS(15,	32,	256)					\
+    SIZE_CLASS(16,	64,	320)					\
+    SIZE_CLASS(17,	64,	384)					\
+    SIZE_CLASS(18,	64,	448)					\
+    SIZE_CLASS(19,	64,	512)					\
+    SIZE_CLASS(20,	128,	640)					\
+    SIZE_CLASS(21,	128,	768)					\
+    SIZE_CLASS(22,	128,	896)					\
+    SIZE_CLASS(23,	128,	1024)					\
+    SIZE_CLASS(24,	256,	1280)					\
+    SIZE_CLASS(25,	256,	1536)					\
+    SIZE_CLASS(26,	256,	1792)					\
+    SIZE_CLASS(27,	256,	2048)					\
+    SIZE_CLASS(28,	512,	2560)					\
+    SIZE_CLASS(29,	512,	3072)					\
+    SIZE_CLASS(30,	512,	3584)					\
+
+#define	NBINS		31
+#define	SMALL_MAXCLASS	3584
+#endif
+
+#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	8,	8)					\
+    SIZE_CLASS(1,	8,	16)					\
+    SIZE_CLASS(2,	8,	24)					\
+    SIZE_CLASS(3,	8,	32)					\
+    SIZE_CLASS(4,	8,	40)					\
+    SIZE_CLASS(5,	8,	48)					\
+    SIZE_CLASS(6,	8,	56)					\
+    SIZE_CLASS(7,	8,	64)					\
+    SIZE_CLASS(8,	16,	80)					\
+    SIZE_CLASS(9,	16,	96)					\
+    SIZE_CLASS(10,	16,	112)					\
+    SIZE_CLASS(11,	16,	128)					\
+    SIZE_CLASS(12,	32,	160)					\
+    SIZE_CLASS(13,	32,	192)					\
+    SIZE_CLASS(14,	32,	224)					\
+    SIZE_CLASS(15,	32,	256)					\
+    SIZE_CLASS(16,	64,	320)					\
+    SIZE_CLASS(17,	64,	384)					\
+    SIZE_CLASS(18,	64,	448)					\
+    SIZE_CLASS(19,	64,	512)					\
+    SIZE_CLASS(20,	128,	640)					\
+    SIZE_CLASS(21,	128,	768)					\
+    SIZE_CLASS(22,	128,	896)					\
+    SIZE_CLASS(23,	128,	1024)					\
+    SIZE_CLASS(24,	256,	1280)					\
+    SIZE_CLASS(25,	256,	1536)					\
+    SIZE_CLASS(26,	256,	1792)					\
+    SIZE_CLASS(27,	256,	2048)					\
+    SIZE_CLASS(28,	512,	2560)					\
+    SIZE_CLASS(29,	512,	3072)					\
+    SIZE_CLASS(30,	512,	3584)					\
+    SIZE_CLASS(31,	512,	4096)					\
+    SIZE_CLASS(32,	1024,	5120)					\
+    SIZE_CLASS(33,	1024,	6144)					\
+    SIZE_CLASS(34,	1024,	7168)					\
+
+#define	NBINS		35
+#define	SMALL_MAXCLASS	7168
+#endif
+
+#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 14)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	8,	8)					\
+    SIZE_CLASS(1,	8,	16)					\
+    SIZE_CLASS(2,	8,	24)					\
+    SIZE_CLASS(3,	8,	32)					\
+    SIZE_CLASS(4,	8,	40)					\
+    SIZE_CLASS(5,	8,	48)					\
+    SIZE_CLASS(6,	8,	56)					\
+    SIZE_CLASS(7,	8,	64)					\
+    SIZE_CLASS(8,	16,	80)					\
+    SIZE_CLASS(9,	16,	96)					\
+    SIZE_CLASS(10,	16,	112)					\
+    SIZE_CLASS(11,	16,	128)					\
+    SIZE_CLASS(12,	32,	160)					\
+    SIZE_CLASS(13,	32,	192)					\
+    SIZE_CLASS(14,	32,	224)					\
+    SIZE_CLASS(15,	32,	256)					\
+    SIZE_CLASS(16,	64,	320)					\
+    SIZE_CLASS(17,	64,	384)					\
+    SIZE_CLASS(18,	64,	448)					\
+    SIZE_CLASS(19,	64,	512)					\
+    SIZE_CLASS(20,	128,	640)					\
+    SIZE_CLASS(21,	128,	768)					\
+    SIZE_CLASS(22,	128,	896)					\
+    SIZE_CLASS(23,	128,	1024)					\
+    SIZE_CLASS(24,	256,	1280)					\
+    SIZE_CLASS(25,	256,	1536)					\
+    SIZE_CLASS(26,	256,	1792)					\
+    SIZE_CLASS(27,	256,	2048)					\
+    SIZE_CLASS(28,	512,	2560)					\
+    SIZE_CLASS(29,	512,	3072)					\
+    SIZE_CLASS(30,	512,	3584)					\
+    SIZE_CLASS(31,	512,	4096)					\
+    SIZE_CLASS(32,	1024,	5120)					\
+    SIZE_CLASS(33,	1024,	6144)					\
+    SIZE_CLASS(34,	1024,	7168)					\
+    SIZE_CLASS(35,	1024,	8192)					\
+    SIZE_CLASS(36,	2048,	10240)					\
+    SIZE_CLASS(37,	2048,	12288)					\
+    SIZE_CLASS(38,	2048,	14336)					\
+
+#define	NBINS		39
+#define	SMALL_MAXCLASS	14336
+#endif
+
+#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 15)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	8,	8)					\
+    SIZE_CLASS(1,	8,	16)					\
+    SIZE_CLASS(2,	8,	24)					\
+    SIZE_CLASS(3,	8,	32)					\
+    SIZE_CLASS(4,	8,	40)					\
+    SIZE_CLASS(5,	8,	48)					\
+    SIZE_CLASS(6,	8,	56)					\
+    SIZE_CLASS(7,	8,	64)					\
+    SIZE_CLASS(8,	16,	80)					\
+    SIZE_CLASS(9,	16,	96)					\
+    SIZE_CLASS(10,	16,	112)					\
+    SIZE_CLASS(11,	16,	128)					\
+    SIZE_CLASS(12,	32,	160)					\
+    SIZE_CLASS(13,	32,	192)					\
+    SIZE_CLASS(14,	32,	224)					\
+    SIZE_CLASS(15,	32,	256)					\
+    SIZE_CLASS(16,	64,	320)					\
+    SIZE_CLASS(17,	64,	384)					\
+    SIZE_CLASS(18,	64,	448)					\
+    SIZE_CLASS(19,	64,	512)					\
+    SIZE_CLASS(20,	128,	640)					\
+    SIZE_CLASS(21,	128,	768)					\
+    SIZE_CLASS(22,	128,	896)					\
+    SIZE_CLASS(23,	128,	1024)					\
+    SIZE_CLASS(24,	256,	1280)					\
+    SIZE_CLASS(25,	256,	1536)					\
+    SIZE_CLASS(26,	256,	1792)					\
+    SIZE_CLASS(27,	256,	2048)					\
+    SIZE_CLASS(28,	512,	2560)					\
+    SIZE_CLASS(29,	512,	3072)					\
+    SIZE_CLASS(30,	512,	3584)					\
+    SIZE_CLASS(31,	512,	4096)					\
+    SIZE_CLASS(32,	1024,	5120)					\
+    SIZE_CLASS(33,	1024,	6144)					\
+    SIZE_CLASS(34,	1024,	7168)					\
+    SIZE_CLASS(35,	1024,	8192)					\
+    SIZE_CLASS(36,	2048,	10240)					\
+    SIZE_CLASS(37,	2048,	12288)					\
+    SIZE_CLASS(38,	2048,	14336)					\
+    SIZE_CLASS(39,	2048,	16384)					\
+    SIZE_CLASS(40,	4096,	20480)					\
+    SIZE_CLASS(41,	4096,	24576)					\
+    SIZE_CLASS(42,	4096,	28672)					\
+
+#define	NBINS		43
+#define	SMALL_MAXCLASS	28672
+#endif
+
+#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	8,	8)					\
+    SIZE_CLASS(1,	8,	16)					\
+    SIZE_CLASS(2,	8,	24)					\
+    SIZE_CLASS(3,	8,	32)					\
+    SIZE_CLASS(4,	8,	40)					\
+    SIZE_CLASS(5,	8,	48)					\
+    SIZE_CLASS(6,	8,	56)					\
+    SIZE_CLASS(7,	8,	64)					\
+    SIZE_CLASS(8,	16,	80)					\
+    SIZE_CLASS(9,	16,	96)					\
+    SIZE_CLASS(10,	16,	112)					\
+    SIZE_CLASS(11,	16,	128)					\
+    SIZE_CLASS(12,	32,	160)					\
+    SIZE_CLASS(13,	32,	192)					\
+    SIZE_CLASS(14,	32,	224)					\
+    SIZE_CLASS(15,	32,	256)					\
+    SIZE_CLASS(16,	64,	320)					\
+    SIZE_CLASS(17,	64,	384)					\
+    SIZE_CLASS(18,	64,	448)					\
+    SIZE_CLASS(19,	64,	512)					\
+    SIZE_CLASS(20,	128,	640)					\
+    SIZE_CLASS(21,	128,	768)					\
+    SIZE_CLASS(22,	128,	896)					\
+    SIZE_CLASS(23,	128,	1024)					\
+    SIZE_CLASS(24,	256,	1280)					\
+    SIZE_CLASS(25,	256,	1536)					\
+    SIZE_CLASS(26,	256,	1792)					\
+    SIZE_CLASS(27,	256,	2048)					\
+    SIZE_CLASS(28,	512,	2560)					\
+    SIZE_CLASS(29,	512,	3072)					\
+    SIZE_CLASS(30,	512,	3584)					\
+    SIZE_CLASS(31,	512,	4096)					\
+    SIZE_CLASS(32,	1024,	5120)					\
+    SIZE_CLASS(33,	1024,	6144)					\
+    SIZE_CLASS(34,	1024,	7168)					\
+    SIZE_CLASS(35,	1024,	8192)					\
+    SIZE_CLASS(36,	2048,	10240)					\
+    SIZE_CLASS(37,	2048,	12288)					\
+    SIZE_CLASS(38,	2048,	14336)					\
+    SIZE_CLASS(39,	2048,	16384)					\
+    SIZE_CLASS(40,	4096,	20480)					\
+    SIZE_CLASS(41,	4096,	24576)					\
+    SIZE_CLASS(42,	4096,	28672)					\
+    SIZE_CLASS(43,	4096,	32768)					\
+    SIZE_CLASS(44,	8192,	40960)					\
+    SIZE_CLASS(45,	8192,	49152)					\
+    SIZE_CLASS(46,	8192,	57344)					\
+
+#define	NBINS		47
+#define	SMALL_MAXCLASS	57344
+#endif
+
+#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	8,	8)					\
+    SIZE_CLASS(1,	8,	16)					\
+    SIZE_CLASS(2,	16,	32)					\
+    SIZE_CLASS(3,	16,	48)					\
+    SIZE_CLASS(4,	16,	64)					\
+    SIZE_CLASS(5,	16,	80)					\
+    SIZE_CLASS(6,	16,	96)					\
+    SIZE_CLASS(7,	16,	112)					\
+    SIZE_CLASS(8,	16,	128)					\
+    SIZE_CLASS(9,	32,	160)					\
+    SIZE_CLASS(10,	32,	192)					\
+    SIZE_CLASS(11,	32,	224)					\
+    SIZE_CLASS(12,	32,	256)					\
+    SIZE_CLASS(13,	64,	320)					\
+    SIZE_CLASS(14,	64,	384)					\
+    SIZE_CLASS(15,	64,	448)					\
+    SIZE_CLASS(16,	64,	512)					\
+    SIZE_CLASS(17,	128,	640)					\
+    SIZE_CLASS(18,	128,	768)					\
+    SIZE_CLASS(19,	128,	896)					\
+    SIZE_CLASS(20,	128,	1024)					\
+    SIZE_CLASS(21,	256,	1280)					\
+    SIZE_CLASS(22,	256,	1536)					\
+    SIZE_CLASS(23,	256,	1792)					\
+    SIZE_CLASS(24,	256,	2048)					\
+    SIZE_CLASS(25,	512,	2560)					\
+    SIZE_CLASS(26,	512,	3072)					\
+    SIZE_CLASS(27,	512,	3584)					\
+
+#define	NBINS		28
+#define	SMALL_MAXCLASS	3584
+#endif
+
+#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	8,	8)					\
+    SIZE_CLASS(1,	8,	16)					\
+    SIZE_CLASS(2,	16,	32)					\
+    SIZE_CLASS(3,	16,	48)					\
+    SIZE_CLASS(4,	16,	64)					\
+    SIZE_CLASS(5,	16,	80)					\
+    SIZE_CLASS(6,	16,	96)					\
+    SIZE_CLASS(7,	16,	112)					\
+    SIZE_CLASS(8,	16,	128)					\
+    SIZE_CLASS(9,	32,	160)					\
+    SIZE_CLASS(10,	32,	192)					\
+    SIZE_CLASS(11,	32,	224)					\
+    SIZE_CLASS(12,	32,	256)					\
+    SIZE_CLASS(13,	64,	320)					\
+    SIZE_CLASS(14,	64,	384)					\
+    SIZE_CLASS(15,	64,	448)					\
+    SIZE_CLASS(16,	64,	512)					\
+    SIZE_CLASS(17,	128,	640)					\
+    SIZE_CLASS(18,	128,	768)					\
+    SIZE_CLASS(19,	128,	896)					\
+    SIZE_CLASS(20,	128,	1024)					\
+    SIZE_CLASS(21,	256,	1280)					\
+    SIZE_CLASS(22,	256,	1536)					\
+    SIZE_CLASS(23,	256,	1792)					\
+    SIZE_CLASS(24,	256,	2048)					\
+    SIZE_CLASS(25,	512,	2560)					\
+    SIZE_CLASS(26,	512,	3072)					\
+    SIZE_CLASS(27,	512,	3584)					\
+    SIZE_CLASS(28,	512,	4096)					\
+    SIZE_CLASS(29,	1024,	5120)					\
+    SIZE_CLASS(30,	1024,	6144)					\
+    SIZE_CLASS(31,	1024,	7168)					\
+
+#define	NBINS		32
+#define	SMALL_MAXCLASS	7168
+#endif
+
+#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 14)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	8,	8)					\
+    SIZE_CLASS(1,	8,	16)					\
+    SIZE_CLASS(2,	16,	32)					\
+    SIZE_CLASS(3,	16,	48)					\
+    SIZE_CLASS(4,	16,	64)					\
+    SIZE_CLASS(5,	16,	80)					\
+    SIZE_CLASS(6,	16,	96)					\
+    SIZE_CLASS(7,	16,	112)					\
+    SIZE_CLASS(8,	16,	128)					\
+    SIZE_CLASS(9,	32,	160)					\
+    SIZE_CLASS(10,	32,	192)					\
+    SIZE_CLASS(11,	32,	224)					\
+    SIZE_CLASS(12,	32,	256)					\
+    SIZE_CLASS(13,	64,	320)					\
+    SIZE_CLASS(14,	64,	384)					\
+    SIZE_CLASS(15,	64,	448)					\
+    SIZE_CLASS(16,	64,	512)					\
+    SIZE_CLASS(17,	128,	640)					\
+    SIZE_CLASS(18,	128,	768)					\
+    SIZE_CLASS(19,	128,	896)					\
+    SIZE_CLASS(20,	128,	1024)					\
+    SIZE_CLASS(21,	256,	1280)					\
+    SIZE_CLASS(22,	256,	1536)					\
+    SIZE_CLASS(23,	256,	1792)					\
+    SIZE_CLASS(24,	256,	2048)					\
+    SIZE_CLASS(25,	512,	2560)					\
+    SIZE_CLASS(26,	512,	3072)					\
+    SIZE_CLASS(27,	512,	3584)					\
+    SIZE_CLASS(28,	512,	4096)					\
+    SIZE_CLASS(29,	1024,	5120)					\
+    SIZE_CLASS(30,	1024,	6144)					\
+    SIZE_CLASS(31,	1024,	7168)					\
+    SIZE_CLASS(32,	1024,	8192)					\
+    SIZE_CLASS(33,	2048,	10240)					\
+    SIZE_CLASS(34,	2048,	12288)					\
+    SIZE_CLASS(35,	2048,	14336)					\
+
+#define	NBINS		36
+#define	SMALL_MAXCLASS	14336
+#endif
+
+#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 15)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	8,	8)					\
+    SIZE_CLASS(1,	8,	16)					\
+    SIZE_CLASS(2,	16,	32)					\
+    SIZE_CLASS(3,	16,	48)					\
+    SIZE_CLASS(4,	16,	64)					\
+    SIZE_CLASS(5,	16,	80)					\
+    SIZE_CLASS(6,	16,	96)					\
+    SIZE_CLASS(7,	16,	112)					\
+    SIZE_CLASS(8,	16,	128)					\
+    SIZE_CLASS(9,	32,	160)					\
+    SIZE_CLASS(10,	32,	192)					\
+    SIZE_CLASS(11,	32,	224)					\
+    SIZE_CLASS(12,	32,	256)					\
+    SIZE_CLASS(13,	64,	320)					\
+    SIZE_CLASS(14,	64,	384)					\
+    SIZE_CLASS(15,	64,	448)					\
+    SIZE_CLASS(16,	64,	512)					\
+    SIZE_CLASS(17,	128,	640)					\
+    SIZE_CLASS(18,	128,	768)					\
+    SIZE_CLASS(19,	128,	896)					\
+    SIZE_CLASS(20,	128,	1024)					\
+    SIZE_CLASS(21,	256,	1280)					\
+    SIZE_CLASS(22,	256,	1536)					\
+    SIZE_CLASS(23,	256,	1792)					\
+    SIZE_CLASS(24,	256,	2048)					\
+    SIZE_CLASS(25,	512,	2560)					\
+    SIZE_CLASS(26,	512,	3072)					\
+    SIZE_CLASS(27,	512,	3584)					\
+    SIZE_CLASS(28,	512,	4096)					\
+    SIZE_CLASS(29,	1024,	5120)					\
+    SIZE_CLASS(30,	1024,	6144)					\
+    SIZE_CLASS(31,	1024,	7168)					\
+    SIZE_CLASS(32,	1024,	8192)					\
+    SIZE_CLASS(33,	2048,	10240)					\
+    SIZE_CLASS(34,	2048,	12288)					\
+    SIZE_CLASS(35,	2048,	14336)					\
+    SIZE_CLASS(36,	2048,	16384)					\
+    SIZE_CLASS(37,	4096,	20480)					\
+    SIZE_CLASS(38,	4096,	24576)					\
+    SIZE_CLASS(39,	4096,	28672)					\
+
+#define	NBINS		40
+#define	SMALL_MAXCLASS	28672
+#endif
+
+#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	8,	8)					\
+    SIZE_CLASS(1,	8,	16)					\
+    SIZE_CLASS(2,	16,	32)					\
+    SIZE_CLASS(3,	16,	48)					\
+    SIZE_CLASS(4,	16,	64)					\
+    SIZE_CLASS(5,	16,	80)					\
+    SIZE_CLASS(6,	16,	96)					\
+    SIZE_CLASS(7,	16,	112)					\
+    SIZE_CLASS(8,	16,	128)					\
+    SIZE_CLASS(9,	32,	160)					\
+    SIZE_CLASS(10,	32,	192)					\
+    SIZE_CLASS(11,	32,	224)					\
+    SIZE_CLASS(12,	32,	256)					\
+    SIZE_CLASS(13,	64,	320)					\
+    SIZE_CLASS(14,	64,	384)					\
+    SIZE_CLASS(15,	64,	448)					\
+    SIZE_CLASS(16,	64,	512)					\
+    SIZE_CLASS(17,	128,	640)					\
+    SIZE_CLASS(18,	128,	768)					\
+    SIZE_CLASS(19,	128,	896)					\
+    SIZE_CLASS(20,	128,	1024)					\
+    SIZE_CLASS(21,	256,	1280)					\
+    SIZE_CLASS(22,	256,	1536)					\
+    SIZE_CLASS(23,	256,	1792)					\
+    SIZE_CLASS(24,	256,	2048)					\
+    SIZE_CLASS(25,	512,	2560)					\
+    SIZE_CLASS(26,	512,	3072)					\
+    SIZE_CLASS(27,	512,	3584)					\
+    SIZE_CLASS(28,	512,	4096)					\
+    SIZE_CLASS(29,	1024,	5120)					\
+    SIZE_CLASS(30,	1024,	6144)					\
+    SIZE_CLASS(31,	1024,	7168)					\
+    SIZE_CLASS(32,	1024,	8192)					\
+    SIZE_CLASS(33,	2048,	10240)					\
+    SIZE_CLASS(34,	2048,	12288)					\
+    SIZE_CLASS(35,	2048,	14336)					\
+    SIZE_CLASS(36,	2048,	16384)					\
+    SIZE_CLASS(37,	4096,	20480)					\
+    SIZE_CLASS(38,	4096,	24576)					\
+    SIZE_CLASS(39,	4096,	28672)					\
+    SIZE_CLASS(40,	4096,	32768)					\
+    SIZE_CLASS(41,	8192,	40960)					\
+    SIZE_CLASS(42,	8192,	49152)					\
+    SIZE_CLASS(43,	8192,	57344)					\
+
+#define	NBINS		44
+#define	SMALL_MAXCLASS	57344
+#endif
+
+#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	16,	16)					\
+    SIZE_CLASS(1,	16,	32)					\
+    SIZE_CLASS(2,	16,	48)					\
+    SIZE_CLASS(3,	16,	64)					\
+    SIZE_CLASS(4,	16,	80)					\
+    SIZE_CLASS(5,	16,	96)					\
+    SIZE_CLASS(6,	16,	112)					\
+    SIZE_CLASS(7,	16,	128)					\
+    SIZE_CLASS(8,	32,	160)					\
+    SIZE_CLASS(9,	32,	192)					\
+    SIZE_CLASS(10,	32,	224)					\
+    SIZE_CLASS(11,	32,	256)					\
+    SIZE_CLASS(12,	64,	320)					\
+    SIZE_CLASS(13,	64,	384)					\
+    SIZE_CLASS(14,	64,	448)					\
+    SIZE_CLASS(15,	64,	512)					\
+    SIZE_CLASS(16,	128,	640)					\
+    SIZE_CLASS(17,	128,	768)					\
+    SIZE_CLASS(18,	128,	896)					\
+    SIZE_CLASS(19,	128,	1024)					\
+    SIZE_CLASS(20,	256,	1280)					\
+    SIZE_CLASS(21,	256,	1536)					\
+    SIZE_CLASS(22,	256,	1792)					\
+    SIZE_CLASS(23,	256,	2048)					\
+    SIZE_CLASS(24,	512,	2560)					\
+    SIZE_CLASS(25,	512,	3072)					\
+    SIZE_CLASS(26,	512,	3584)					\
+
+#define	NBINS		27
+#define	SMALL_MAXCLASS	3584
+#endif
+
+#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	16,	16)					\
+    SIZE_CLASS(1,	16,	32)					\
+    SIZE_CLASS(2,	16,	48)					\
+    SIZE_CLASS(3,	16,	64)					\
+    SIZE_CLASS(4,	16,	80)					\
+    SIZE_CLASS(5,	16,	96)					\
+    SIZE_CLASS(6,	16,	112)					\
+    SIZE_CLASS(7,	16,	128)					\
+    SIZE_CLASS(8,	32,	160)					\
+    SIZE_CLASS(9,	32,	192)					\
+    SIZE_CLASS(10,	32,	224)					\
+    SIZE_CLASS(11,	32,	256)					\
+    SIZE_CLASS(12,	64,	320)					\
+    SIZE_CLASS(13,	64,	384)					\
+    SIZE_CLASS(14,	64,	448)					\
+    SIZE_CLASS(15,	64,	512)					\
+    SIZE_CLASS(16,	128,	640)					\
+    SIZE_CLASS(17,	128,	768)					\
+    SIZE_CLASS(18,	128,	896)					\
+    SIZE_CLASS(19,	128,	1024)					\
+    SIZE_CLASS(20,	256,	1280)					\
+    SIZE_CLASS(21,	256,	1536)					\
+    SIZE_CLASS(22,	256,	1792)					\
+    SIZE_CLASS(23,	256,	2048)					\
+    SIZE_CLASS(24,	512,	2560)					\
+    SIZE_CLASS(25,	512,	3072)					\
+    SIZE_CLASS(26,	512,	3584)					\
+    SIZE_CLASS(27,	512,	4096)					\
+    SIZE_CLASS(28,	1024,	5120)					\
+    SIZE_CLASS(29,	1024,	6144)					\
+    SIZE_CLASS(30,	1024,	7168)					\
+
+#define	NBINS		31
+#define	SMALL_MAXCLASS	7168
+#endif
+
+#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 14)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	16,	16)					\
+    SIZE_CLASS(1,	16,	32)					\
+    SIZE_CLASS(2,	16,	48)					\
+    SIZE_CLASS(3,	16,	64)					\
+    SIZE_CLASS(4,	16,	80)					\
+    SIZE_CLASS(5,	16,	96)					\
+    SIZE_CLASS(6,	16,	112)					\
+    SIZE_CLASS(7,	16,	128)					\
+    SIZE_CLASS(8,	32,	160)					\
+    SIZE_CLASS(9,	32,	192)					\
+    SIZE_CLASS(10,	32,	224)					\
+    SIZE_CLASS(11,	32,	256)					\
+    SIZE_CLASS(12,	64,	320)					\
+    SIZE_CLASS(13,	64,	384)					\
+    SIZE_CLASS(14,	64,	448)					\
+    SIZE_CLASS(15,	64,	512)					\
+    SIZE_CLASS(16,	128,	640)					\
+    SIZE_CLASS(17,	128,	768)					\
+    SIZE_CLASS(18,	128,	896)					\
+    SIZE_CLASS(19,	128,	1024)					\
+    SIZE_CLASS(20,	256,	1280)					\
+    SIZE_CLASS(21,	256,	1536)					\
+    SIZE_CLASS(22,	256,	1792)					\
+    SIZE_CLASS(23,	256,	2048)					\
+    SIZE_CLASS(24,	512,	2560)					\
+    SIZE_CLASS(25,	512,	3072)					\
+    SIZE_CLASS(26,	512,	3584)					\
+    SIZE_CLASS(27,	512,	4096)					\
+    SIZE_CLASS(28,	1024,	5120)					\
+    SIZE_CLASS(29,	1024,	6144)					\
+    SIZE_CLASS(30,	1024,	7168)					\
+    SIZE_CLASS(31,	1024,	8192)					\
+    SIZE_CLASS(32,	2048,	10240)					\
+    SIZE_CLASS(33,	2048,	12288)					\
+    SIZE_CLASS(34,	2048,	14336)					\
+
+#define	NBINS		35
+#define	SMALL_MAXCLASS	14336
+#endif
+
+#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 15)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	16,	16)					\
+    SIZE_CLASS(1,	16,	32)					\
+    SIZE_CLASS(2,	16,	48)					\
+    SIZE_CLASS(3,	16,	64)					\
+    SIZE_CLASS(4,	16,	80)					\
+    SIZE_CLASS(5,	16,	96)					\
+    SIZE_CLASS(6,	16,	112)					\
+    SIZE_CLASS(7,	16,	128)					\
+    SIZE_CLASS(8,	32,	160)					\
+    SIZE_CLASS(9,	32,	192)					\
+    SIZE_CLASS(10,	32,	224)					\
+    SIZE_CLASS(11,	32,	256)					\
+    SIZE_CLASS(12,	64,	320)					\
+    SIZE_CLASS(13,	64,	384)					\
+    SIZE_CLASS(14,	64,	448)					\
+    SIZE_CLASS(15,	64,	512)					\
+    SIZE_CLASS(16,	128,	640)					\
+    SIZE_CLASS(17,	128,	768)					\
+    SIZE_CLASS(18,	128,	896)					\
+    SIZE_CLASS(19,	128,	1024)					\
+    SIZE_CLASS(20,	256,	1280)					\
+    SIZE_CLASS(21,	256,	1536)					\
+    SIZE_CLASS(22,	256,	1792)					\
+    SIZE_CLASS(23,	256,	2048)					\
+    SIZE_CLASS(24,	512,	2560)					\
+    SIZE_CLASS(25,	512,	3072)					\
+    SIZE_CLASS(26,	512,	3584)					\
+    SIZE_CLASS(27,	512,	4096)					\
+    SIZE_CLASS(28,	1024,	5120)					\
+    SIZE_CLASS(29,	1024,	6144)					\
+    SIZE_CLASS(30,	1024,	7168)					\
+    SIZE_CLASS(31,	1024,	8192)					\
+    SIZE_CLASS(32,	2048,	10240)					\
+    SIZE_CLASS(33,	2048,	12288)					\
+    SIZE_CLASS(34,	2048,	14336)					\
+    SIZE_CLASS(35,	2048,	16384)					\
+    SIZE_CLASS(36,	4096,	20480)					\
+    SIZE_CLASS(37,	4096,	24576)					\
+    SIZE_CLASS(38,	4096,	28672)					\
+
+#define	NBINS		39
+#define	SMALL_MAXCLASS	28672
+#endif
+
+#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16)
+#define	SIZE_CLASSES_DEFINED
+/*  SIZE_CLASS(bin,	delta,	sz) */
+#define	SIZE_CLASSES							\
+    SIZE_CLASS(0,	16,	16)					\
+    SIZE_CLASS(1,	16,	32)					\
+    SIZE_CLASS(2,	16,	48)					\
+    SIZE_CLASS(3,	16,	64)					\
+    SIZE_CLASS(4,	16,	80)					\
+    SIZE_CLASS(5,	16,	96)					\
+    SIZE_CLASS(6,	16,	112)					\
+    SIZE_CLASS(7,	16,	128)					\
+    SIZE_CLASS(8,	32,	160)					\
+    SIZE_CLASS(9,	32,	192)					\
+    SIZE_CLASS(10,	32,	224)					\
+    SIZE_CLASS(11,	32,	256)					\
+    SIZE_CLASS(12,	64,	320)					\
+    SIZE_CLASS(13,	64,	384)					\
+    SIZE_CLASS(14,	64,	448)					\
+    SIZE_CLASS(15,	64,	512)					\
+    SIZE_CLASS(16,	128,	640)					\
+    SIZE_CLASS(17,	128,	768)					\
+    SIZE_CLASS(18,	128,	896)					\
+    SIZE_CLASS(19,	128,	1024)					\
+    SIZE_CLASS(20,	256,	1280)					\
+    SIZE_CLASS(21,	256,	1536)					\
+    SIZE_CLASS(22,	256,	1792)					\
+    SIZE_CLASS(23,	256,	2048)					\
+    SIZE_CLASS(24,	512,	2560)					\
+    SIZE_CLASS(25,	512,	3072)					\
+    SIZE_CLASS(26,	512,	3584)					\
+    SIZE_CLASS(27,	512,	4096)					\
+    SIZE_CLASS(28,	1024,	5120)					\
+    SIZE_CLASS(29,	1024,	6144)					\
+    SIZE_CLASS(30,	1024,	7168)					\
+    SIZE_CLASS(31,	1024,	8192)					\
+    SIZE_CLASS(32,	2048,	10240)					\
+    SIZE_CLASS(33,	2048,	12288)					\
+    SIZE_CLASS(34,	2048,	14336)					\
+    SIZE_CLASS(35,	2048,	16384)					\
+    SIZE_CLASS(36,	4096,	20480)					\
+    SIZE_CLASS(37,	4096,	24576)					\
+    SIZE_CLASS(38,	4096,	28672)					\
+    SIZE_CLASS(39,	4096,	32768)					\
+    SIZE_CLASS(40,	8192,	40960)					\
+    SIZE_CLASS(41,	8192,	49152)					\
+    SIZE_CLASS(42,	8192,	57344)					\
+
+#define	NBINS		43
+#define	SMALL_MAXCLASS	57344
+#endif
+
+#ifndef SIZE_CLASSES_DEFINED
+#  error "No size class definitions match configuration"
+#endif
+#undef SIZE_CLASSES_DEFINED
+/*
+ * The small_size2bin lookup table uses uint8_t to encode each bin index, so we
+ * cannot support more than 256 small size classes.  Further constrain NBINS to
+ * 255 to support prof_promote, since all small size classes, plus a "not
+ * small" size class must be stored in 8 bits of arena_chunk_map_t's bits
+ * field.
+ */
+#if (NBINS > 255)
+#  error "Too many small size classes"
+#endif
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/stats.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/stats.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,173 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct tcache_bin_stats_s tcache_bin_stats_t;
+typedef struct malloc_bin_stats_s malloc_bin_stats_t;
+typedef struct malloc_large_stats_s malloc_large_stats_t;
+typedef struct arena_stats_s arena_stats_t;
+typedef struct chunk_stats_s chunk_stats_t;
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct tcache_bin_stats_s {
+	/*
+	 * Number of allocation requests that corresponded to the size of this
+	 * bin.
+	 */
+	uint64_t	nrequests;
+};
+
+struct malloc_bin_stats_s {
+	/*
+	 * Current number of bytes allocated, including objects currently
+	 * cached by tcache.
+	 */
+	size_t		allocated;
+
+	/*
+	 * Total number of allocation/deallocation requests served directly by
+	 * the bin.  Note that tcache may allocate an object, then recycle it
+	 * many times, resulting many increments to nrequests, but only one
+	 * each to nmalloc and ndalloc.
+	 */
+	uint64_t	nmalloc;
+	uint64_t	ndalloc;
+
+	/*
+	 * Number of allocation requests that correspond to the size of this
+	 * bin.  This includes requests served by tcache, though tcache only
+	 * periodically merges into this counter.
+	 */
+	uint64_t	nrequests;
+
+	/* Number of tcache fills from this bin. */
+	uint64_t	nfills;
+
+	/* Number of tcache flushes to this bin. */
+	uint64_t	nflushes;
+
+	/* Total number of runs created for this bin's size class. */
+	uint64_t	nruns;
+
+	/*
+	 * Total number of runs reused by extracting them from the runs tree for
+	 * this bin's size class.
+	 */
+	uint64_t	reruns;
+
+	/* Current number of runs in this bin. */
+	size_t		curruns;
+};
+
+struct malloc_large_stats_s {
+	/*
+	 * Total number of allocation/deallocation requests served directly by
+	 * the arena.  Note that tcache may allocate an object, then recycle it
+	 * many times, resulting many increments to nrequests, but only one
+	 * each to nmalloc and ndalloc.
+	 */
+	uint64_t	nmalloc;
+	uint64_t	ndalloc;
+
+	/*
+	 * Number of allocation requests that correspond to this size class.
+	 * This includes requests served by tcache, though tcache only
+	 * periodically merges into this counter.
+	 */
+	uint64_t	nrequests;
+
+	/* Current number of runs of this size class. */
+	size_t		curruns;
+};
+
+struct arena_stats_s {
+	/* Number of bytes currently mapped. */
+	size_t		mapped;
+
+	/*
+	 * Total number of purge sweeps, total number of madvise calls made,
+	 * and total pages purged in order to keep dirty unused memory under
+	 * control.
+	 */
+	uint64_t	npurge;
+	uint64_t	nmadvise;
+	uint64_t	purged;
+
+	/* Per-size-category statistics. */
+	size_t		allocated_large;
+	uint64_t	nmalloc_large;
+	uint64_t	ndalloc_large;
+	uint64_t	nrequests_large;
+
+	/*
+	 * One element for each possible size class, including sizes that
+	 * overlap with bin size classes.  This is necessary because ipalloc()
+	 * sometimes has to use such large objects in order to assure proper
+	 * alignment.
+	 */
+	malloc_large_stats_t	*lstats;
+};
+
+struct chunk_stats_s {
+	/* Number of chunks that were allocated. */
+	uint64_t	nchunks;
+
+	/* High-water mark for number of chunks allocated. */
+	size_t		highchunks;
+
+	/*
+	 * Current number of chunks allocated.  This value isn't maintained for
+	 * any other purpose, so keep track of it in order to be able to set
+	 * highchunks.
+	 */
+	size_t		curchunks;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+extern bool	opt_stats_print;
+
+extern size_t	stats_cactive;
+
+void	stats_print(void (*write)(void *, const char *), void *cbopaque,
+    const char *opts);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+size_t	stats_cactive_get(void);
+void	stats_cactive_add(size_t size);
+void	stats_cactive_sub(size_t size);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
+JEMALLOC_INLINE size_t
+stats_cactive_get(void)
+{
+
+	return (atomic_read_z(&stats_cactive));
+}
+
+JEMALLOC_INLINE void
+stats_cactive_add(size_t size)
+{
+
+	atomic_add_z(&stats_cactive, size);
+}
+
+JEMALLOC_INLINE void
+stats_cactive_sub(size_t size)
+{
+
+	atomic_sub_z(&stats_cactive, size);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/tcache.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/tcache.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,494 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct tcache_bin_info_s tcache_bin_info_t;
+typedef struct tcache_bin_s tcache_bin_t;
+typedef struct tcache_s tcache_t;
+
+/*
+ * tcache pointers close to NULL are used to encode state information that is
+ * used for two purposes: preventing thread caching on a per thread basis and
+ * cleaning up during thread shutdown.
+ */
+#define	TCACHE_STATE_DISABLED		((tcache_t *)(uintptr_t)1)
+#define	TCACHE_STATE_REINCARNATED	((tcache_t *)(uintptr_t)2)
+#define	TCACHE_STATE_PURGATORY		((tcache_t *)(uintptr_t)3)
+#define	TCACHE_STATE_MAX		TCACHE_STATE_PURGATORY
+
+/*
+ * Absolute maximum number of cache slots for each small bin in the thread
+ * cache.  This is an additional constraint beyond that imposed as: twice the
+ * number of regions per run for this size class.
+ *
+ * This constant must be an even number.
+ */
+#define	TCACHE_NSLOTS_SMALL_MAX		200
+
+/* Number of cache slots for large size classes. */
+#define	TCACHE_NSLOTS_LARGE		20
+
+/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
+#define	LG_TCACHE_MAXCLASS_DEFAULT	15
+
+/*
+ * TCACHE_GC_SWEEP is the approximate number of allocation events between
+ * full GC sweeps.  Integer rounding may cause the actual number to be
+ * slightly higher, since GC is performed incrementally.
+ */
+#define	TCACHE_GC_SWEEP			8192
+
+/* Number of tcache allocation/deallocation events between incremental GCs. */
+#define	TCACHE_GC_INCR							\
+    ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+typedef enum {
+	tcache_enabled_false   = 0, /* Enable cast to/from bool. */
+	tcache_enabled_true    = 1,
+	tcache_enabled_default = 2
+} tcache_enabled_t;
+
+/*
+ * Read-only information associated with each element of tcache_t's tbins array
+ * is stored separately, mainly to reduce memory usage.
+ */
+struct tcache_bin_info_s {
+	unsigned	ncached_max;	/* Upper limit on ncached. */
+};
+
+struct tcache_bin_s {
+	tcache_bin_stats_t tstats;
+	int		low_water;	/* Min # cached since last GC. */
+	unsigned	lg_fill_div;	/* Fill (ncached_max >> lg_fill_div). */
+	unsigned	ncached;	/* # of cached objects. */
+	void		**avail;	/* Stack of available objects. */
+};
+
+struct tcache_s {
+	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
+	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum() */
+	arena_t		*arena;		/* This thread's arena. */
+	unsigned	ev_cnt;		/* Event count since incremental GC. */
+	unsigned	next_gc_bin;	/* Next bin to GC. */
+	tcache_bin_t	tbins[1];	/* Dynamically sized. */
+	/*
+	 * The pointer stacks associated with tbins follow as a contiguous
+	 * array.  During tcache initialization, the avail pointer in each
+	 * element of tbins is initialized to point to the proper offset within
+	 * this array.
+	 */
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+extern bool	opt_tcache;
+extern ssize_t	opt_lg_tcache_max;
+
+extern tcache_bin_info_t	*tcache_bin_info;
+
+/*
+ * Number of tcache bins.  There are NBINS small-object bins, plus 0 or more
+ * large-object bins.
+ */
+extern size_t			nhbins;
+
+/* Maximum cached size class. */
+extern size_t			tcache_maxclass;
+
+void	tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
+    tcache_t *tcache);
+void	tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
+    tcache_t *tcache);
+void	tcache_arena_associate(tcache_t *tcache, arena_t *arena);
+void	tcache_arena_dissociate(tcache_t *tcache);
+tcache_t *tcache_create(arena_t *arena);
+void	*tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
+    size_t binind);
+void	tcache_destroy(tcache_t *tcache);
+void	tcache_thread_cleanup(void *arg);
+void	tcache_stats_merge(tcache_t *tcache, arena_t *arena);
+bool	tcache_boot0(void);
+bool	tcache_boot1(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
+malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
+
+void	tcache_event(tcache_t *tcache);
+void	tcache_flush(void);
+bool	tcache_enabled_get(void);
+tcache_t *tcache_get(bool create);
+void	tcache_enabled_set(bool enabled);
+void	*tcache_alloc_easy(tcache_bin_t *tbin);
+void	*tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
+void	*tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
+void	tcache_dalloc_small(tcache_t *tcache, void *ptr);
+void	tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
+/* Map of thread-specific caches. */
+malloc_tsd_externs(tcache, tcache_t *)
+malloc_tsd_funcs(JEMALLOC_INLINE, tcache, tcache_t *, NULL,
+    tcache_thread_cleanup)
+/* Per thread flag that allows thread caches to be disabled. */
+malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
+malloc_tsd_funcs(JEMALLOC_INLINE, tcache_enabled, tcache_enabled_t,
+    tcache_enabled_default, malloc_tsd_no_cleanup)
+
+JEMALLOC_INLINE void
+tcache_flush(void)
+{
+	tcache_t *tcache;
+
+	cassert(config_tcache);
+
+	tcache = *tcache_tsd_get();
+	if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
+		return;
+	tcache_destroy(tcache);
+	tcache = NULL;
+	tcache_tsd_set(&tcache);
+}
+
+JEMALLOC_INLINE bool
+tcache_enabled_get(void)
+{
+	tcache_enabled_t tcache_enabled;
+
+	cassert(config_tcache);
+
+	tcache_enabled = *tcache_enabled_tsd_get();
+	if (tcache_enabled == tcache_enabled_default) {
+		tcache_enabled = (tcache_enabled_t)opt_tcache;
+		tcache_enabled_tsd_set(&tcache_enabled);
+	}
+
+	return ((bool)tcache_enabled);
+}
+
+JEMALLOC_INLINE void
+tcache_enabled_set(bool enabled)
+{
+	tcache_enabled_t tcache_enabled;
+	tcache_t *tcache;
+
+	cassert(config_tcache);
+
+	tcache_enabled = (tcache_enabled_t)enabled;
+	tcache_enabled_tsd_set(&tcache_enabled);
+	tcache = *tcache_tsd_get();
+	if (enabled) {
+		if (tcache == TCACHE_STATE_DISABLED) {
+			tcache = NULL;
+			tcache_tsd_set(&tcache);
+		}
+	} else /* disabled */ {
+		if (tcache > TCACHE_STATE_MAX) {
+			tcache_destroy(tcache);
+			tcache = NULL;
+		}
+		if (tcache == NULL) {
+			tcache = TCACHE_STATE_DISABLED;
+			tcache_tsd_set(&tcache);
+		}
+	}
+}
+
+JEMALLOC_INLINE tcache_t *
+tcache_get(bool create)
+{
+	tcache_t *tcache;
+
+	if (config_tcache == false)
+		return (NULL);
+	if (config_lazy_lock && isthreaded == false)
+		return (NULL);
+
+	tcache = *tcache_tsd_get();
+	if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
+		if (tcache == TCACHE_STATE_DISABLED)
+			return (NULL);
+		if (tcache == NULL) {
+			if (create == false) {
+				/*
+				 * Creating a tcache here would cause
+				 * allocation as a side effect of free().
+				 * Ordinarily that would be okay since
+				 * tcache_create() failure is a soft failure
+				 * that doesn't propagate.  However, if TLS
+				 * data are freed via free() as in glibc,
+				 * subtle corruption could result from setting
+				 * a TLS variable after its backing memory is
+				 * freed.
+				 */
+				return (NULL);
+			}
+			if (tcache_enabled_get() == false) {
+				tcache_enabled_set(false); /* Memoize. */
+				return (NULL);
+			}
+			return (tcache_create(choose_arena(NULL)));
+		}
+		if (tcache == TCACHE_STATE_PURGATORY) {
+			/*
+			 * Make a note that an allocator function was called
+			 * after tcache_thread_cleanup() was called.
+			 */
+			tcache = TCACHE_STATE_REINCARNATED;
+			tcache_tsd_set(&tcache);
+			return (NULL);
+		}
+		if (tcache == TCACHE_STATE_REINCARNATED)
+			return (NULL);
+		not_reached();
+	}
+
+	return (tcache);
+}
+
+JEMALLOC_INLINE void
+tcache_event(tcache_t *tcache)
+{
+
+	if (TCACHE_GC_INCR == 0)
+		return;
+
+	tcache->ev_cnt++;
+	assert(tcache->ev_cnt <= TCACHE_GC_INCR);
+	if (tcache->ev_cnt == TCACHE_GC_INCR) {
+		size_t binind = tcache->next_gc_bin;
+		tcache_bin_t *tbin = &tcache->tbins[binind];
+		tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
+
+		if (tbin->low_water > 0) {
+			/*
+			 * Flush (ceiling) 3/4 of the objects below the low
+			 * water mark.
+			 */
+			if (binind < NBINS) {
+				tcache_bin_flush_small(tbin, binind,
+				    tbin->ncached - tbin->low_water +
+				    (tbin->low_water >> 2), tcache);
+			} else {
+				tcache_bin_flush_large(tbin, binind,
+				    tbin->ncached - tbin->low_water +
+				    (tbin->low_water >> 2), tcache);
+			}
+			/*
+			 * Reduce fill count by 2X.  Limit lg_fill_div such that
+			 * the fill count is always at least 1.
+			 */
+			if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
+			    >= 1)
+				tbin->lg_fill_div++;
+		} else if (tbin->low_water < 0) {
+			/*
+			 * Increase fill count by 2X.  Make sure lg_fill_div
+			 * stays greater than 0.
+			 */
+			if (tbin->lg_fill_div > 1)
+				tbin->lg_fill_div--;
+		}
+		tbin->low_water = tbin->ncached;
+
+		tcache->next_gc_bin++;
+		if (tcache->next_gc_bin == nhbins)
+			tcache->next_gc_bin = 0;
+		tcache->ev_cnt = 0;
+	}
+}
+
+JEMALLOC_INLINE void *
+tcache_alloc_easy(tcache_bin_t *tbin)
+{
+	void *ret;
+
+	if (tbin->ncached == 0) {
+		tbin->low_water = -1;
+		return (NULL);
+	}
+	tbin->ncached--;
+	if ((int)tbin->ncached < tbin->low_water)
+		tbin->low_water = tbin->ncached;
+	ret = tbin->avail[tbin->ncached];
+	return (ret);
+}
+
+JEMALLOC_INLINE void *
+tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
+{
+	void *ret;
+	size_t binind;
+	tcache_bin_t *tbin;
+
+	binind = SMALL_SIZE2BIN(size);
+	assert(binind < NBINS);
+	tbin = &tcache->tbins[binind];
+	ret = tcache_alloc_easy(tbin);
+	if (ret == NULL) {
+		ret = tcache_alloc_small_hard(tcache, tbin, binind);
+		if (ret == NULL)
+			return (NULL);
+	}
+	assert(arena_salloc(ret, false) == arena_bin_info[binind].reg_size);
+
+	if (zero == false) {
+		if (config_fill) {
+			if (opt_junk) {
+				arena_alloc_junk_small(ret,
+				    &arena_bin_info[binind], false);
+			} else if (opt_zero)
+				memset(ret, 0, size);
+		}
+	} else {
+		if (config_fill && opt_junk) {
+			arena_alloc_junk_small(ret, &arena_bin_info[binind],
+			    true);
+		}
+		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+		memset(ret, 0, size);
+	}
+
+	if (config_stats)
+		tbin->tstats.nrequests++;
+	if (config_prof)
+		tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
+	tcache_event(tcache);
+	return (ret);
+}
+
+JEMALLOC_INLINE void *
+tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
+{
+	void *ret;
+	size_t binind;
+	tcache_bin_t *tbin;
+
+	size = PAGE_CEILING(size);
+	assert(size <= tcache_maxclass);
+	binind = NBINS + (size >> LG_PAGE) - 1;
+	assert(binind < nhbins);
+	tbin = &tcache->tbins[binind];
+	ret = tcache_alloc_easy(tbin);
+	if (ret == NULL) {
+		/*
+		 * Only allocate one large object at a time, because it's quite
+		 * expensive to create one and not use it.
+		 */
+		ret = arena_malloc_large(tcache->arena, size, zero);
+		if (ret == NULL)
+			return (NULL);
+	} else {
+		if (config_prof) {
+			arena_chunk_t *chunk =
+			    (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
+			size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
+			    LG_PAGE);
+			chunk->map[pageind-map_bias].bits &=
+			    ~CHUNK_MAP_CLASS_MASK;
+		}
+		if (zero == false) {
+			if (config_fill) {
+				if (opt_junk)
+					memset(ret, 0xa5, size);
+				else if (opt_zero)
+					memset(ret, 0, size);
+			}
+		} else {
+			VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+			memset(ret, 0, size);
+		}
+
+		if (config_stats)
+			tbin->tstats.nrequests++;
+		if (config_prof)
+			tcache->prof_accumbytes += size;
+	}
+
+	tcache_event(tcache);
+	return (ret);
+}
+
+JEMALLOC_INLINE void
+tcache_dalloc_small(tcache_t *tcache, void *ptr)
+{
+	arena_t *arena;
+	arena_chunk_t *chunk;
+	arena_run_t *run;
+	arena_bin_t *bin;
+	tcache_bin_t *tbin;
+	tcache_bin_info_t *tbin_info;
+	size_t pageind, binind;
+	arena_chunk_map_t *mapelm;
+
+	assert(arena_salloc(ptr, false) <= SMALL_MAXCLASS);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	arena = chunk->arena;
+	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+	mapelm = &chunk->map[pageind-map_bias];
+	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
+	    (mapelm->bits >> LG_PAGE)) << LG_PAGE));
+	bin = run->bin;
+	binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
+	    sizeof(arena_bin_t);
+	assert(binind < NBINS);
+
+	if (config_fill && opt_junk)
+		arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
+
+	tbin = &tcache->tbins[binind];
+	tbin_info = &tcache_bin_info[binind];
+	if (tbin->ncached == tbin_info->ncached_max) {
+		tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
+		    1), tcache);
+	}
+	assert(tbin->ncached < tbin_info->ncached_max);
+	tbin->avail[tbin->ncached] = ptr;
+	tbin->ncached++;
+
+	tcache_event(tcache);
+}
+
+JEMALLOC_INLINE void
+tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
+{
+	size_t binind;
+	tcache_bin_t *tbin;
+	tcache_bin_info_t *tbin_info;
+
+	assert((size & PAGE_MASK) == 0);
+	assert(arena_salloc(ptr, false) > SMALL_MAXCLASS);
+	assert(arena_salloc(ptr, false) <= tcache_maxclass);
+
+	binind = NBINS + (size >> LG_PAGE) - 1;
+
+	if (config_fill && opt_junk)
+		memset(ptr, 0x5a, size);
+
+	tbin = &tcache->tbins[binind];
+	tbin_info = &tcache_bin_info[binind];
+	if (tbin->ncached == tbin_info->ncached_max) {
+		tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
+		    1), tcache);
+	}
+	assert(tbin->ncached < tbin_info->ncached_max);
+	tbin->avail[tbin->ncached] = ptr;
+	tbin->ncached++;
+
+	tcache_event(tcache);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/tsd.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/tsd.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,309 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/* Maximum number of malloc_tsd users with cleanup functions. */
+#define	MALLOC_TSD_CLEANUPS_MAX	8
+
+typedef struct malloc_tsd_cleanup_s malloc_tsd_cleanup_t;
+struct malloc_tsd_cleanup_s {
+	bool	(*f)(void *);
+	void	*arg;
+};
+
+/*
+ * TLS/TSD-agnostic macro-based implementation of thread-specific data.  There
+ * are four macros that support (at least) three use cases: file-private,
+ * library-private, and library-private inlined.  Following is an example
+ * library-private tsd variable:
+ *
+ * In example.h:
+ *   typedef struct {
+ *           int x;
+ *           int y;
+ *   } example_t;
+ *   #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
+ *   malloc_tsd_protos(, example, example_t *)
+ *   malloc_tsd_externs(example, example_t *)
+ * In example.c:
+ *   malloc_tsd_data(, example, example_t *, EX_INITIALIZER)
+ *   malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER,
+ *       example_tsd_cleanup)
+ *
+ * The result is a set of generated functions, e.g.:
+ *
+ *   bool example_tsd_boot(void) {...}
+ *   example_t **example_tsd_get() {...}
+ *   void example_tsd_set(example_t **val) {...}
+ *
+ * Note that all of the functions deal in terms of (a_type *) rather than
+ * (a_type)  so that it is possible to support non-pointer types (unlike
+ * pthreads TSD).  example_tsd_cleanup() is passed an (a_type *) pointer that is
+ * cast to (void *).  This means that the cleanup function needs to cast *and*
+ * dereference the function argument, e.g.:
+ *
+ *   void
+ *   example_tsd_cleanup(void *arg)
+ *   {
+ *           example_t *example = *(example_t **)arg;
+ *
+ *           [...]
+ *           if ([want the cleanup function to be called again]) {
+ *                   example_tsd_set(&example);
+ *           }
+ *   }
+ *
+ * If example_tsd_set() is called within example_tsd_cleanup(), it will be
+ * called again.  This is similar to how pthreads TSD destruction works, except
+ * that pthreads only calls the cleanup function again if the value was set to
+ * non-NULL.
+ */
+
+/* malloc_tsd_protos(). */
+#define	malloc_tsd_protos(a_attr, a_name, a_type)			\
+a_attr bool								\
+a_name##_tsd_boot(void);						\
+a_attr a_type *								\
+a_name##_tsd_get(void);							\
+a_attr void								\
+a_name##_tsd_set(a_type *val);
+
+/* malloc_tsd_externs(). */
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#define	malloc_tsd_externs(a_name, a_type)				\
+extern __thread a_type	a_name##_tls;					\
+extern __thread bool	a_name##_initialized;				\
+extern bool		a_name##_booted;
+#elif (defined(JEMALLOC_TLS))
+#define	malloc_tsd_externs(a_name, a_type)				\
+extern __thread a_type	a_name##_tls;					\
+extern pthread_key_t	a_name##_tsd;					\
+extern bool		a_name##_booted;
+#else
+#define	malloc_tsd_externs(a_name, a_type)				\
+extern pthread_key_t	a_name##_tsd;					\
+extern bool		a_name##_booted;
+#endif
+
+/* malloc_tsd_data(). */
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#define	malloc_tsd_data(a_attr, a_name, a_type, a_initializer)		\
+a_attr __thread a_type JEMALLOC_TLS_MODEL				\
+    a_name##_tls = a_initializer;					\
+a_attr __thread bool JEMALLOC_TLS_MODEL					\
+    a_name##_initialized = false;					\
+a_attr bool		a_name##_booted = false;
+#elif (defined(JEMALLOC_TLS))
+#define	malloc_tsd_data(a_attr, a_name, a_type, a_initializer)		\
+a_attr __thread a_type JEMALLOC_TLS_MODEL				\
+    a_name##_tls = a_initializer;					\
+a_attr pthread_key_t	a_name##_tsd;					\
+a_attr bool		a_name##_booted = false;
+#else
+#define	malloc_tsd_data(a_attr, a_name, a_type, a_initializer)		\
+a_attr pthread_key_t	a_name##_tsd;					\
+a_attr bool		a_name##_booted = false;
+#endif
+
+/* malloc_tsd_funcs(). */
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#define	malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer,		\
+    a_cleanup)								\
+/* Initialization/cleanup. */						\
+a_attr bool								\
+a_name##_tsd_cleanup_wrapper(void *arg)					\
+{									\
+	bool (*cleanup)(void *) = arg;					\
+									\
+	if (a_name##_initialized) {					\
+		a_name##_initialized = false;				\
+		cleanup(&a_name##_tls);					\
+	}								\
+	return (a_name##_initialized);					\
+}									\
+a_attr bool								\
+a_name##_tsd_boot(void)							\
+{									\
+									\
+	if (a_cleanup != malloc_tsd_no_cleanup) {			\
+		malloc_tsd_cleanup_register(				\
+		    &a_name##_tsd_cleanup_wrapper, a_cleanup);		\
+	}								\
+	a_name##_booted = true;						\
+	return (false);							\
+}									\
+/* Get/set. */								\
+a_attr a_type *								\
+a_name##_tsd_get(void)							\
+{									\
+									\
+	assert(a_name##_booted);					\
+	return (&a_name##_tls);						\
+}									\
+a_attr void								\
+a_name##_tsd_set(a_type *val)						\
+{									\
+									\
+	assert(a_name##_booted);					\
+	a_name##_tls = (*val);						\
+	if (a_cleanup != malloc_tsd_no_cleanup)				\
+		a_name##_initialized = true;				\
+}
+#elif (defined(JEMALLOC_TLS))
+#define	malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer,		\
+    a_cleanup)								\
+/* Initialization/cleanup. */						\
+a_attr bool								\
+a_name##_tsd_boot(void)							\
+{									\
+									\
+	if (a_cleanup != malloc_tsd_no_cleanup) {			\
+		if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0)	\
+			return (true);					\
+	}								\
+	a_name##_booted = true;						\
+	return (false);							\
+}									\
+/* Get/set. */								\
+a_attr a_type *								\
+a_name##_tsd_get(void)							\
+{									\
+									\
+	assert(a_name##_booted);					\
+	return (&a_name##_tls);						\
+}									\
+a_attr void								\
+a_name##_tsd_set(a_type *val)						\
+{									\
+									\
+	assert(a_name##_booted);					\
+	a_name##_tls = (*val);						\
+	if (a_cleanup != malloc_tsd_no_cleanup) {			\
+		if (pthread_setspecific(a_name##_tsd,			\
+		    (void *)(&a_name##_tls))) {				\
+			malloc_write("<jemalloc>: Error"		\
+			    " setting TSD for "#a_name"\n");		\
+			if (opt_abort)					\
+				abort();				\
+		}							\
+	}								\
+}
+#else
+#define	malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer,		\
+    a_cleanup)								\
+/* Data structure. */							\
+typedef struct {							\
+	bool	isstatic;						\
+	bool	initialized;						\
+	a_type	val;							\
+} a_name##_tsd_wrapper_t;						\
+/* Initialization/cleanup. */						\
+a_attr void								\
+a_name##_tsd_cleanup_wrapper(void *arg)					\
+{									\
+	a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
+									\
+	if (a_cleanup != malloc_tsd_no_cleanup &&			\
+	    wrapper->initialized) {					\
+		wrapper->initialized = false;				\
+		a_cleanup(&wrapper->val);				\
+		if (wrapper->initialized) {				\
+			/* Trigger another cleanup round. */		\
+			if (pthread_setspecific(a_name##_tsd,		\
+			    (void *)wrapper)) {				\
+				malloc_write("<jemalloc>: Error"	\
+				    " setting TSD for "#a_name"\n");	\
+				if (opt_abort)				\
+					abort();			\
+			}						\
+			return;						\
+		}							\
+	}								\
+	if (wrapper->isstatic == false)					\
+		malloc_tsd_dalloc(wrapper);				\
+}									\
+a_attr bool								\
+a_name##_tsd_boot(void)							\
+{									\
+									\
+	if (pthread_key_create(&a_name##_tsd,				\
+	    a_name##_tsd_cleanup_wrapper) != 0)				\
+		return (true);						\
+	a_name##_booted = true;						\
+	return (false);							\
+}									\
+/* Get/set. */								\
+a_attr a_name##_tsd_wrapper_t *						\
+a_name##_tsd_get_wrapper(void)						\
+{									\
+	a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)	\
+	    pthread_getspecific(a_name##_tsd);				\
+									\
+	if (wrapper == NULL) {						\
+		wrapper = (a_name##_tsd_wrapper_t *)			\
+		    malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t));	\
+		if (wrapper == NULL) {					\
+			static a_name##_tsd_wrapper_t			\
+			    a_name##_tsd_static_data =			\
+			    {true, false, a_initializer};		\
+			malloc_write("<jemalloc>: Error allocating"	\
+			    " TSD for "#a_name"\n");			\
+			if (opt_abort)					\
+				abort();				\
+			wrapper = &a_name##_tsd_static_data;		\
+		} else {						\
+			static a_type tsd_static_data = a_initializer;	\
+			wrapper->isstatic = false;			\
+			wrapper->val = tsd_static_data;			\
+		}							\
+		if (pthread_setspecific(a_name##_tsd,			\
+		    (void *)wrapper)) {					\
+			malloc_write("<jemalloc>: Error setting"	\
+			    " TSD for "#a_name"\n");			\
+			if (opt_abort)					\
+				abort();				\
+		}							\
+	}								\
+	return (wrapper);						\
+}									\
+a_attr a_type *								\
+a_name##_tsd_get(void)							\
+{									\
+	a_name##_tsd_wrapper_t *wrapper;				\
+									\
+	assert(a_name##_booted);					\
+	wrapper = a_name##_tsd_get_wrapper();				\
+	return (&wrapper->val);						\
+}									\
+a_attr void								\
+a_name##_tsd_set(a_type *val)						\
+{									\
+	a_name##_tsd_wrapper_t *wrapper;				\
+									\
+	assert(a_name##_booted);					\
+	wrapper = a_name##_tsd_get_wrapper();				\
+	wrapper->val = *(val);						\
+	if (a_cleanup != malloc_tsd_no_cleanup)				\
+		wrapper->initialized = true;				\
+}
+#endif
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void	*malloc_tsd_malloc(size_t size);
+void	malloc_tsd_dalloc(void *wrapper);
+void	malloc_tsd_no_cleanup(void *);
+void	malloc_tsd_cleanup_register(bool (*f)(void *), void *arg);
+void	malloc_tsd_boot(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/internal/util.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/internal/util.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,146 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/* Size of stack-allocated buffer passed to buferror(). */
+#define	BUFERROR_BUF		64
+
+/*
+ * Size of stack-allocated buffer used by malloc_{,v,vc}printf().  This must be
+ * large enough for all possible uses within jemalloc.
+ */
+#define	MALLOC_PRINTF_BUFSIZE	4096
+
+/*
+ * Wrap a cpp argument that contains commas such that it isn't broken up into
+ * multiple arguments.
+ */
+#define JEMALLOC_CONCAT(...) __VA_ARGS__
+
+/*
+ * Silence compiler warnings due to uninitialized values.  This is used
+ * wherever the compiler fails to recognize that the variable is never used
+ * uninitialized.
+ */
+#ifdef JEMALLOC_CC_SILENCE
+#  define JEMALLOC_CC_SILENCE_INIT(v) = v
+#else
+#  define JEMALLOC_CC_SILENCE_INIT(v)
+#endif
+
+/*
+ * Define a custom assert() in order to reduce the chances of deadlock during
+ * assertion failure.
+ */
+#ifndef assert
+#define	assert(e) do {							\
+	if (config_debug && !(e)) {					\
+		malloc_printf(						\
+		    "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n",	\
+		    __FILE__, __LINE__, #e);				\
+		abort();						\
+	}								\
+} while (0)
+#endif
+
+/* Use to assert a particular configuration, e.g., cassert(config_debug). */
+#define	cassert(c) do {							\
+	if ((c) == false)						\
+		assert(false);						\
+} while (0)
+
+#ifndef not_reached
+#define	not_reached() do {						\
+	if (config_debug) {						\
+		malloc_printf(						\
+		    "<jemalloc>: %s:%d: Unreachable code reached\n",	\
+		    __FILE__, __LINE__);				\
+		abort();						\
+	}								\
+} while (0)
+#endif
+
+#ifndef not_implemented
+#define	not_implemented() do {						\
+	if (config_debug) {						\
+		malloc_printf("<jemalloc>: %s:%d: Not implemented\n",	\
+		    __FILE__, __LINE__);				\
+		abort();						\
+	}								\
+} while (0)
+#endif
+
+#define	assert_not_implemented(e) do {					\
+	if (config_debug && !(e))					\
+		not_implemented();					\
+} while (0)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+extern void	(*je_malloc_message)(void *wcbopaque, const char *s);
+
+int	buferror(int errnum, char *buf, size_t buflen);
+uintmax_t	malloc_strtoumax(const char *nptr, char **endptr, int base);
+
+/*
+ * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
+ * point math.
+ */
+int	malloc_vsnprintf(char *str, size_t size, const char *format,
+    va_list ap);
+int	malloc_snprintf(char *str, size_t size, const char *format, ...)
+    JEMALLOC_ATTR(format(printf, 3, 4));
+void	malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+    const char *format, va_list ap);
+void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
+    const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
+void	malloc_printf(const char *format, ...)
+    JEMALLOC_ATTR(format(printf, 1, 2));
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+size_t	pow2_ceil(size_t x);
+void	malloc_write(const char *s);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
+/* Compute the smallest power of 2 that is >= x. */
+JEMALLOC_INLINE size_t
+pow2_ceil(size_t x)
+{
+
+	x--;
+	x |= x >> 1;
+	x |= x >> 2;
+	x |= x >> 4;
+	x |= x >> 8;
+	x |= x >> 16;
+#if (LG_SIZEOF_PTR == 3)
+	x |= x >> 32;
+#endif
+	x++;
+	return (x);
+}
+
+/*
+ * Wrapper around malloc_message() that avoids the need for
+ * je_malloc_message(...) throughout the code.
+ */
+JEMALLOC_INLINE void
+malloc_write(const char *s)
+{
+
+	je_malloc_message(NULL, s);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/jemalloc.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/jemalloc.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,141 @@
+#ifndef JEMALLOC_H_
+#define	JEMALLOC_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <limits.h>
+#include <strings.h>
+
+#define	JEMALLOC_VERSION "1.0.0-258-g9ef7f5dc34ff02f50d401e41c8d9a4a928e7c2aa"
+#define	JEMALLOC_VERSION_MAJOR 1
+#define	JEMALLOC_VERSION_MINOR 0
+#define	JEMALLOC_VERSION_BUGFIX 0
+#define	JEMALLOC_VERSION_NREV 258
+#define	JEMALLOC_VERSION_GID "9ef7f5dc34ff02f50d401e41c8d9a4a928e7c2aa"
+
+#include "jemalloc_defs.h"
+#include "jemalloc_FreeBSD.h"
+
+#ifdef JEMALLOC_EXPERIMENTAL
+#define	ALLOCM_LG_ALIGN(la)	(la)
+#if LG_SIZEOF_PTR == 2
+#define	ALLOCM_ALIGN(a)	(ffs(a)-1)
+#else
+#define	ALLOCM_ALIGN(a)	((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
+#endif
+#define	ALLOCM_ZERO	((int)0x40)
+#define	ALLOCM_NO_MOVE	((int)0x80)
+
+#define	ALLOCM_SUCCESS		0
+#define	ALLOCM_ERR_OOM		1
+#define	ALLOCM_ERR_NOT_MOVED	2
+#endif
+
+/*
+ * The je_ prefix on the following public symbol declarations is an artifact of
+ * namespace management, and should be omitted in application code unless
+ * JEMALLOC_NO_DEMANGLE is defined (see below).
+ */
+extern const char	*je_malloc_conf;
+extern void		(*je_malloc_message)(void *, const char *);
+
+void	*je_malloc(size_t size) JEMALLOC_ATTR(malloc);
+void	*je_calloc(size_t num, size_t size) JEMALLOC_ATTR(malloc);
+int	je_posix_memalign(void **memptr, size_t alignment, size_t size)
+    JEMALLOC_ATTR(nonnull(1));
+void	*je_aligned_alloc(size_t alignment, size_t size) JEMALLOC_ATTR(malloc);
+void	*je_realloc(void *ptr, size_t size);
+void	je_free(void *ptr);
+
+size_t	je_malloc_usable_size(const void *ptr);
+void	je_malloc_stats_print(void (*write_cb)(void *, const char *),
+    void *je_cbopaque, const char *opts);
+int	je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
+    size_t newlen);
+int	je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp);
+int	je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp,
+    size_t *oldlenp, void *newp, size_t newlen);
+
+#ifdef JEMALLOC_EXPERIMENTAL
+int	je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
+    JEMALLOC_ATTR(nonnull(1));
+int	je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra,
+    int flags) JEMALLOC_ATTR(nonnull(1));
+int	je_sallocm(const void *ptr, size_t *rsize, int flags)
+    JEMALLOC_ATTR(nonnull(1));
+int	je_dallocm(void *ptr, int flags) JEMALLOC_ATTR(nonnull(1));
+int	je_nallocm(size_t *rsize, size_t size, int flags);
+#endif
+
+/*
+ * By default application code must explicitly refer to mangled symbol names,
+ * so that it is possible to use jemalloc in conjunction with another allocator
+ * in the same application.  Define JEMALLOC_MANGLE in order to cause automatic
+ * name mangling that matches the API prefixing that happened as a result of
+ * --with-mangling and/or --with-jemalloc-prefix configuration settings.
+ */
+#ifdef JEMALLOC_MANGLE
+#ifndef JEMALLOC_NO_DEMANGLE
+#define	JEMALLOC_NO_DEMANGLE
+#endif
+#define	malloc_conf je_malloc_conf
+#define	malloc_message je_malloc_message
+#define	malloc je_malloc
+#define	calloc je_calloc
+#define	posix_memalign je_posix_memalign
+#define	aligned_alloc je_aligned_alloc
+#define	realloc je_realloc
+#define	free je_free
+#define	malloc_usable_size je_malloc_usable_size
+#define	malloc_stats_print je_malloc_stats_print
+#define	mallctl je_mallctl
+#define	mallctlnametomib je_mallctlnametomib
+#define	mallctlbymib je_mallctlbymib
+#define	memalign je_memalign
+#define	valloc je_valloc
+#ifdef JEMALLOC_EXPERIMENTAL
+#define	allocm je_allocm
+#define	rallocm je_rallocm
+#define	sallocm je_sallocm
+#define	dallocm je_dallocm
+#define	nallocm je_nallocm
+#endif
+#endif
+
+/*
+ * The je_* macros can be used as stable alternative names for the public
+ * jemalloc API if JEMALLOC_NO_DEMANGLE is defined.  This is primarily meant
+ * for use in jemalloc itself, but it can be used by application code to
+ * provide isolation from the name mangling specified via --with-mangling
+ * and/or --with-jemalloc-prefix.
+ */
+#ifndef JEMALLOC_NO_DEMANGLE
+#undef je_malloc_conf
+#undef je_malloc_message
+#undef je_malloc
+#undef je_calloc
+#undef je_posix_memalign
+#undef je_aligned_alloc
+#undef je_realloc
+#undef je_free
+#undef je_malloc_usable_size
+#undef je_malloc_stats_print
+#undef je_mallctl
+#undef je_mallctlnametomib
+#undef je_mallctlbymib
+#undef je_memalign
+#undef je_valloc
+#ifdef JEMALLOC_EXPERIMENTAL
+#undef je_allocm
+#undef je_rallocm
+#undef je_sallocm
+#undef je_dallocm
+#undef je_nallocm
+#endif
+#endif
+
+#ifdef __cplusplus
+};
+#endif
+#endif /* JEMALLOC_H_ */
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,76 @@
+/*
+ * Override settings that were generated in jemalloc_defs.h as necessary.
+ */
+
+#undef JEMALLOC_OVERRIDE_VALLOC
+
+#ifndef MALLOC_PRODUCTION
+#define	JEMALLOC_DEBUG
+#endif
+
+/*
+ * The following are architecture-dependent, so conditionally define them for
+ * each supported architecture.
+ */
+#undef CPU_SPINWAIT
+#undef JEMALLOC_TLS_MODEL
+#undef STATIC_PAGE_SHIFT
+#undef LG_SIZEOF_PTR
+#undef LG_SIZEOF_INT
+#undef LG_SIZEOF_LONG
+#undef LG_SIZEOF_INTMAX_T
+
+#ifdef __i386__
+#  define LG_SIZEOF_PTR		2
+#  define CPU_SPINWAIT		__asm__ volatile("pause")
+#  define JEMALLOC_TLS_MODEL	__attribute__((tls_model("initial-exec")))
+#endif
+#ifdef __ia64__
+#  define LG_SIZEOF_PTR		3
+#endif
+#ifdef __sparc64__
+#  define LG_SIZEOF_PTR		3
+#  define JEMALLOC_TLS_MODEL	__attribute__((tls_model("initial-exec")))
+#endif
+#ifdef __amd64__
+#  define LG_SIZEOF_PTR		3
+#  define CPU_SPINWAIT		__asm__ volatile("pause")
+#  define JEMALLOC_TLS_MODEL	__attribute__((tls_model("initial-exec")))
+#endif
+#ifdef __arm__
+#  define LG_SIZEOF_PTR		2
+#endif
+#ifdef __mips__
+#  define LG_SIZEOF_PTR		2
+#endif
+#ifdef __powerpc64__
+#  define LG_SIZEOF_PTR		3
+#elif defined(__powerpc__)
+#  define LG_SIZEOF_PTR		2
+#endif
+
+#ifndef JEMALLOC_TLS_MODEL
+#  define JEMALLOC_TLS_MODEL	/* Default. */
+#endif
+#ifdef __clang__
+#  undef JEMALLOC_TLS_MODEL
+#  define JEMALLOC_TLS_MODEL	/* clang does not support tls_model yet. */
+#endif
+
+#define	STATIC_PAGE_SHIFT	PAGE_SHIFT
+#define	LG_SIZEOF_INT		2
+#define	LG_SIZEOF_LONG		LG_SIZEOF_PTR
+#define	LG_SIZEOF_INTMAX_T	3
+
+/* Disable lazy-lock machinery, mangle isthreaded, and adjust its type. */
+#undef JEMALLOC_LAZY_LOCK
+extern int __isthreaded;
+#define	isthreaded		((bool)__isthreaded)
+
+/* Mangle. */
+#define	open			_open
+#define	read			_read
+#define	write			_write
+#define	close			_close
+#define	pthread_mutex_lock	_pthread_mutex_lock
+#define	pthread_mutex_unlock	_pthread_mutex_unlock
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/include/jemalloc/jemalloc_defs.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/include/jemalloc/jemalloc_defs.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,239 @@
+/* include/jemalloc/jemalloc_defs.h.  Generated from jemalloc_defs.h.in by configure.  */
+/*
+ * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
+ * public APIs to be prefixed.  This makes it possible, with some care, to use
+ * multiple allocators simultaneously.
+ */
+/* #undef JEMALLOC_PREFIX */
+/* #undef JEMALLOC_CPREFIX */
+
+/*
+ * Name mangling for public symbols is controlled by --with-mangling and
+ * --with-jemalloc-prefix.  With default settings the je_ prefix is stripped by
+ * these macro definitions.
+ */
+#define je_malloc_conf malloc_conf
+#define je_malloc_message malloc_message
+#define je_malloc malloc
+#define je_calloc calloc
+#define je_posix_memalign posix_memalign
+#define je_aligned_alloc aligned_alloc
+#define je_realloc realloc
+#define je_free free
+#define je_malloc_usable_size malloc_usable_size
+#define je_malloc_stats_print malloc_stats_print
+#define je_mallctl mallctl
+#define je_mallctlnametomib mallctlnametomib
+#define je_mallctlbymib mallctlbymib
+/* #undef je_memalign */
+#define je_valloc valloc
+#define je_allocm allocm
+#define je_rallocm rallocm
+#define je_sallocm sallocm
+#define je_dallocm dallocm
+#define je_nallocm nallocm
+
+/*
+ * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
+ * For shared libraries, symbol visibility mechanisms prevent these symbols
+ * from being exported, but for static libraries, naming collisions are a real
+ * possibility.
+ */
+#define JEMALLOC_PRIVATE_NAMESPACE ""
+#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix
+
+/*
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in
+ * order to yield to another virtual CPU.
+ */
+#define CPU_SPINWAIT __asm__ volatile("pause")
+
+/*
+ * Defined if OSAtomic*() functions are available, as provided by Darwin, and
+ * documented in the atomic(3) manual page.
+ */
+/* #undef JEMALLOC_OSATOMIC */
+
+/*
+ * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
+ * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
+ * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
+ * functions are defined in libgcc instead of being inlines)
+ */
+#define JE_FORCE_SYNC_COMPARE_AND_SWAP_4 
+
+/*
+ * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
+ * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
+ * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
+ * functions are defined in libgcc instead of being inlines)
+ */
+#define JE_FORCE_SYNC_COMPARE_AND_SWAP_8 
+
+/*
+ * Defined if OSSpin*() functions are available, as provided by Darwin, and
+ * documented in the spinlock(3) manual page.
+ */
+/* #undef JEMALLOC_OSSPIN */
+
+/*
+ * Defined if _malloc_thread_cleanup() exists.  At least in the case of
+ * FreeBSD, pthread_key_create() allocates, which if used during malloc
+ * bootstrapping will cause recursion into the pthreads library.  Therefore, if
+ * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
+ * malloc_tsd.
+ */
+#define JEMALLOC_MALLOC_THREAD_CLEANUP 
+
+/*
+ * Defined if threaded initialization is known to be safe on this platform.
+ * Among other things, it must be possible to initialize a mutex without
+ * triggering allocation in order for threaded allocation to be safe.
+ */
+/* #undef JEMALLOC_THREADED_INIT */
+
+/*
+ * Defined if the pthreads implementation defines
+ * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
+ * to avoid recursive allocation during mutex initialization.
+ */
+#define JEMALLOC_MUTEX_INIT_CB 1
+
+/* Defined if __attribute__((...)) syntax is supported. */
+#define JEMALLOC_HAVE_ATTR 
+#ifdef JEMALLOC_HAVE_ATTR
+#  define JEMALLOC_CATTR(s, a) __attribute__((s))
+#  define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,)
+#else
+#  define JEMALLOC_CATTR(s, a) a
+#  define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,)
+#endif
+
+/* Defined if sbrk() is supported. */
+#define JEMALLOC_HAVE_SBRK 
+
+/* Non-empty if the tls_model attribute is supported. */
+#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
+
+/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
+#define JEMALLOC_CC_SILENCE 
+
+/*
+ * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
+ * inline functions.
+ */
+/* #undef JEMALLOC_DEBUG */
+
+/* JEMALLOC_STATS enables statistics calculation. */
+#define JEMALLOC_STATS 
+
+/* JEMALLOC_PROF enables allocation profiling. */
+/* #undef JEMALLOC_PROF */
+
+/* Use libunwind for profile backtracing if defined. */
+/* #undef JEMALLOC_PROF_LIBUNWIND */
+
+/* Use libgcc for profile backtracing if defined. */
+/* #undef JEMALLOC_PROF_LIBGCC */
+
+/* Use gcc intrinsics for profile backtracing if defined. */
+/* #undef JEMALLOC_PROF_GCC */
+
+/*
+ * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
+ * This makes it possible to allocate/deallocate objects without any locking
+ * when the cache is in the steady state.
+ */
+#define JEMALLOC_TCACHE 
+
+/*
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
+ * segment (DSS).
+ */
+#define JEMALLOC_DSS 
+
+/* Support memory filling (junk/zero/quarantine/redzone). */
+#define JEMALLOC_FILL 
+
+/* Support the experimental API. */
+#define JEMALLOC_EXPERIMENTAL 
+
+/* Support utrace(2)-based tracing. */
+#define JEMALLOC_UTRACE 
+
+/* Support Valgrind. */
+/* #undef JEMALLOC_VALGRIND */
+
+/* Support optional abort() on OOM. */
+#define JEMALLOC_XMALLOC 
+
+/* Support lazy locking (avoid locking unless a second thread is launched). */
+#define JEMALLOC_LAZY_LOCK 
+
+/* One page is 2^STATIC_PAGE_SHIFT bytes. */
+#define STATIC_PAGE_SHIFT 12
+
+/*
+ * If defined, use munmap() to unmap freed chunks, rather than storing them for
+ * later reuse.  This is automatically disabled if configuration determines
+ * that common sequences of mmap()/munmap() calls will cause virtual memory map
+ * holes.
+ */
+#define JEMALLOC_MUNMAP 
+
+/* TLS is used to map arenas and magazine caches to threads. */
+#define JEMALLOC_TLS 
+
+/*
+ * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
+ * within jemalloc-owned chunks before dereferencing them.
+ */
+/* #undef JEMALLOC_IVSALLOC */
+
+/*
+ * Define overrides for non-standard allocator-related functions if they
+ * are present on the system.
+ */
+/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
+#define JEMALLOC_OVERRIDE_VALLOC 
+
+/*
+ * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
+ */
+/* #undef JEMALLOC_ZONE */
+/* #undef JEMALLOC_ZONE_VERSION */
+
+/* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). */
+/* #undef JEMALLOC_MREMAP_FIXED */
+
+/*
+ * Methods for purging unused pages differ between operating systems.
+ *
+ *   madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
+ *                                 such that new pages will be demand-zeroed if
+ *                                 the address region is later touched.
+ *   madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
+ *                             unused, such that they will be discarded rather
+ *                             than swapped out.
+ */
+/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
+#define JEMALLOC_PURGE_MADVISE_FREE 
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
+#  define JEMALLOC_MADV_PURGE MADV_DONTNEED
+#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+#  define JEMALLOC_MADV_PURGE MADV_FREE
+#else
+#  error "No method defined for purging unused dirty pages."
+#endif
+
+/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
+#define LG_SIZEOF_PTR 3
+
+/* sizeof(int) == 2^LG_SIZEOF_INT. */
+#define LG_SIZEOF_INT 2
+
+/* sizeof(long) == 2^LG_SIZEOF_LONG. */
+#define LG_SIZEOF_LONG 3
+
+/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
+#define LG_SIZEOF_INTMAX_T 3
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/arena.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/arena.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,2248 @@
+#define	JEMALLOC_ARENA_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
+arena_bin_info_t	arena_bin_info[NBINS];
+
+JEMALLOC_ATTR(aligned(CACHELINE))
+const uint8_t	small_size2bin[] = {
+#define	S2B_8(i)	i,
+#define	S2B_16(i)	S2B_8(i) S2B_8(i)
+#define	S2B_32(i)	S2B_16(i) S2B_16(i)
+#define	S2B_64(i)	S2B_32(i) S2B_32(i)
+#define	S2B_128(i)	S2B_64(i) S2B_64(i)
+#define	S2B_256(i)	S2B_128(i) S2B_128(i)
+#define	S2B_512(i)	S2B_256(i) S2B_256(i)
+#define	S2B_1024(i)	S2B_512(i) S2B_512(i)
+#define	S2B_2048(i)	S2B_1024(i) S2B_1024(i)
+#define	S2B_4096(i)	S2B_2048(i) S2B_2048(i)
+#define	S2B_8192(i)	S2B_4096(i) S2B_4096(i)
+#define	SIZE_CLASS(bin, delta, size)					\
+	S2B_##delta(bin)
+	SIZE_CLASSES
+#undef S2B_8
+#undef S2B_16
+#undef S2B_32
+#undef S2B_64
+#undef S2B_128
+#undef S2B_256
+#undef S2B_512
+#undef S2B_1024
+#undef S2B_2048
+#undef S2B_4096
+#undef S2B_8192
+#undef SIZE_CLASS
+};
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
+    bool large, bool zero);
+static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
+static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
+static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
+    bool zero);
+static void	arena_purge(arena_t *arena, bool all);
+static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
+static void	arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
+    arena_run_t *run, size_t oldsize, size_t newsize);
+static void	arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
+    arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
+static arena_run_t	*arena_bin_runs_first(arena_bin_t *bin);
+static void	arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
+static void	arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
+static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
+static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
+static void	*arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
+static void	arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
+    arena_bin_t *bin);
+static void	arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
+    arena_run_t *run, arena_bin_t *bin);
+static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
+    arena_run_t *run, arena_bin_t *bin);
+static void	arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, size_t oldsize, size_t size);
+static bool	arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
+static bool	arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
+    size_t extra, bool zero);
+static size_t	bin_info_run_size_calc(arena_bin_info_t *bin_info,
+    size_t min_run_size);
+static void	bin_info_init(void);
+
+/******************************************************************************/
+
+static inline int
+arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
+{
+	uintptr_t a_mapelm = (uintptr_t)a;
+	uintptr_t b_mapelm = (uintptr_t)b;
+
+	assert(a != NULL);
+	assert(b != NULL);
+
+	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
+}
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
+    u.rb_link, arena_run_comp)
+
+static inline int
+arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
+{
+	int ret;
+	size_t a_size = a->bits & ~PAGE_MASK;
+	size_t b_size = b->bits & ~PAGE_MASK;
+
+	assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
+	    CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
+
+	ret = (a_size > b_size) - (a_size < b_size);
+	if (ret == 0) {
+		uintptr_t a_mapelm, b_mapelm;
+
+		if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
+			a_mapelm = (uintptr_t)a;
+		else {
+			/*
+			 * Treat keys as though they are lower than anything
+			 * else.
+			 */
+			a_mapelm = 0;
+		}
+		b_mapelm = (uintptr_t)b;
+
+		ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
+	}
+
+	return (ret);
+}
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
+    u.rb_link, arena_avail_comp)
+
+static inline void *
+arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
+{
+	void *ret;
+	unsigned regind;
+	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
+	    (uintptr_t)bin_info->bitmap_offset);
+
+	assert(run->nfree > 0);
+	assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
+
+	regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
+	ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
+	    (uintptr_t)(bin_info->reg_interval * regind));
+	run->nfree--;
+	if (regind == run->nextind)
+		run->nextind++;
+	assert(regind < run->nextind);
+	return (ret);
+}
+
+static inline void
+arena_run_reg_dalloc(arena_run_t *run, void *ptr)
+{
+	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+	size_t binind = arena_bin_index(chunk->arena, run->bin);
+	arena_bin_info_t *bin_info = &arena_bin_info[binind];
+	unsigned regind = arena_run_regind(run, bin_info, ptr);
+	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
+	    (uintptr_t)bin_info->bitmap_offset);
+
+	assert(run->nfree < bin_info->nregs);
+	/* Freeing an interior pointer can cause assertion failure. */
+	assert(((uintptr_t)ptr - ((uintptr_t)run +
+	    (uintptr_t)bin_info->reg0_offset)) %
+	    (uintptr_t)bin_info->reg_interval == 0);
+	assert((uintptr_t)ptr >= (uintptr_t)run +
+	    (uintptr_t)bin_info->reg0_offset);
+	/* Freeing an unallocated pointer can cause assertion failure. */
+	assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
+
+	bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
+	run->nfree++;
+}
+
+static inline void
+arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
+{
+	size_t i;
+	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
+
+	for (i = 0; i < PAGE / sizeof(size_t); i++)
+		assert(p[i] == 0);
+}
+
+static void
+arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
+    bool zero)
+{
+	arena_chunk_t *chunk;
+	size_t run_ind, total_pages, need_pages, rem_pages, i;
+	size_t flag_dirty;
+	arena_avail_tree_t *runs_avail;
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+	flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY;
+	runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
+	    &arena->runs_avail_clean;
+	total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >>
+	    LG_PAGE;
+	assert((chunk->map[run_ind+total_pages-1-map_bias].bits &
+	    CHUNK_MAP_DIRTY) == flag_dirty);
+	need_pages = (size >> LG_PAGE);
+	assert(need_pages > 0);
+	assert(need_pages <= total_pages);
+	rem_pages = total_pages - need_pages;
+
+	arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
+	if (config_stats) {
+		/*
+		 * Update stats_cactive if nactive is crossing a chunk
+		 * multiple.
+		 */
+		size_t cactive_diff = CHUNK_CEILING((arena->nactive +
+		    need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
+		    LG_PAGE);
+		if (cactive_diff != 0)
+			stats_cactive_add(cactive_diff);
+	}
+	arena->nactive += need_pages;
+
+	/* Keep track of trailing unused pages for later use. */
+	if (rem_pages > 0) {
+		if (flag_dirty != 0) {
+			chunk->map[run_ind+need_pages-map_bias].bits =
+			    (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
+			chunk->map[run_ind+total_pages-1-map_bias].bits =
+			    (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
+		} else {
+			chunk->map[run_ind+need_pages-map_bias].bits =
+			    (rem_pages << LG_PAGE) |
+			    (chunk->map[run_ind+need_pages-map_bias].bits &
+			    CHUNK_MAP_UNZEROED);
+			chunk->map[run_ind+total_pages-1-map_bias].bits =
+			    (rem_pages << LG_PAGE) |
+			    (chunk->map[run_ind+total_pages-1-map_bias].bits &
+			    CHUNK_MAP_UNZEROED);
+		}
+		arena_avail_tree_insert(runs_avail,
+		    &chunk->map[run_ind+need_pages-map_bias]);
+	}
+
+	/* Update dirty page accounting. */
+	if (flag_dirty != 0) {
+		chunk->ndirty -= need_pages;
+		arena->ndirty -= need_pages;
+	}
+
+	/*
+	 * Update the page map separately for large vs. small runs, since it is
+	 * possible to avoid iteration for large mallocs.
+	 */
+	if (large) {
+		if (zero) {
+			if (flag_dirty == 0) {
+				/*
+				 * The run is clean, so some pages may be
+				 * zeroed (i.e. never before touched).
+				 */
+				for (i = 0; i < need_pages; i++) {
+					if ((chunk->map[run_ind+i-map_bias].bits
+					    & CHUNK_MAP_UNZEROED) != 0) {
+						VALGRIND_MAKE_MEM_UNDEFINED(
+						    (void *)((uintptr_t)
+						    chunk + ((run_ind+i) <<
+						    LG_PAGE)), PAGE);
+						memset((void *)((uintptr_t)
+						    chunk + ((run_ind+i) <<
+						    LG_PAGE)), 0, PAGE);
+					} else if (config_debug) {
+						VALGRIND_MAKE_MEM_DEFINED(
+						    (void *)((uintptr_t)
+						    chunk + ((run_ind+i) <<
+						    LG_PAGE)), PAGE);
+						arena_chunk_validate_zeroed(
+						    chunk, run_ind+i);
+					}
+				}
+			} else {
+				/*
+				 * The run is dirty, so all pages must be
+				 * zeroed.
+				 */
+				VALGRIND_MAKE_MEM_UNDEFINED((void
+				    *)((uintptr_t)chunk + (run_ind <<
+				    LG_PAGE)), (need_pages << LG_PAGE));
+				memset((void *)((uintptr_t)chunk + (run_ind <<
+				    LG_PAGE)), 0, (need_pages << LG_PAGE));
+			}
+		}
+
+		/*
+		 * Set the last element first, in case the run only contains one
+		 * page (i.e. both statements set the same element).
+		 */
+		chunk->map[run_ind+need_pages-1-map_bias].bits =
+		    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty;
+		chunk->map[run_ind-map_bias].bits = size | flag_dirty |
+		    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+	} else {
+		assert(zero == false);
+		/*
+		 * Propagate the dirty and unzeroed flags to the allocated
+		 * small run, so that arena_dalloc_bin_run() has the ability to
+		 * conditionally trim clean pages.
+		 */
+		chunk->map[run_ind-map_bias].bits =
+		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
+		    CHUNK_MAP_ALLOCATED | flag_dirty;
+		/*
+		 * The first page will always be dirtied during small run
+		 * initialization, so a validation failure here would not
+		 * actually cause an observable failure.
+		 */
+		if (config_debug && flag_dirty == 0 &&
+		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
+		    == 0)
+			arena_chunk_validate_zeroed(chunk, run_ind);
+		for (i = 1; i < need_pages - 1; i++) {
+			chunk->map[run_ind+i-map_bias].bits = (i << LG_PAGE)
+			    | (chunk->map[run_ind+i-map_bias].bits &
+			    CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
+			if (config_debug && flag_dirty == 0 &&
+			    (chunk->map[run_ind+i-map_bias].bits &
+			    CHUNK_MAP_UNZEROED) == 0)
+				arena_chunk_validate_zeroed(chunk, run_ind+i);
+		}
+		chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
+		    - 1) << LG_PAGE) |
+		    (chunk->map[run_ind+need_pages-1-map_bias].bits &
+		    CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
+		if (config_debug && flag_dirty == 0 &&
+		    (chunk->map[run_ind+need_pages-1-map_bias].bits &
+		    CHUNK_MAP_UNZEROED) == 0) {
+			arena_chunk_validate_zeroed(chunk,
+			    run_ind+need_pages-1);
+		}
+	}
+}
+
+static arena_chunk_t *
+arena_chunk_alloc(arena_t *arena)
+{
+	arena_chunk_t *chunk;
+	size_t i;
+
+	if (arena->spare != NULL) {
+		arena_avail_tree_t *runs_avail;
+
+		chunk = arena->spare;
+		arena->spare = NULL;
+
+		/* Insert the run into the appropriate runs_avail_* tree. */
+		if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
+			runs_avail = &arena->runs_avail_clean;
+		else
+			runs_avail = &arena->runs_avail_dirty;
+		assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass);
+		assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK)
+		    == arena_maxclass);
+		assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) ==
+		    (chunk->map[chunk_npages-1-map_bias].bits &
+		    CHUNK_MAP_DIRTY));
+		arena_avail_tree_insert(runs_avail, &chunk->map[0]);
+	} else {
+		bool zero;
+		size_t unzeroed;
+
+		zero = false;
+		malloc_mutex_unlock(&arena->lock);
+		chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
+		    false, &zero);
+		malloc_mutex_lock(&arena->lock);
+		if (chunk == NULL)
+			return (NULL);
+		if (config_stats)
+			arena->stats.mapped += chunksize;
+
+		chunk->arena = arena;
+		ql_elm_new(chunk, link_dirty);
+		chunk->dirtied = false;
+
+		/*
+		 * Claim that no pages are in use, since the header is merely
+		 * overhead.
+		 */
+		chunk->ndirty = 0;
+
+		/*
+		 * Initialize the map to contain one maximal free untouched run.
+		 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
+		 * chunk.
+		 */
+		unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
+		chunk->map[0].bits = arena_maxclass | unzeroed;
+		/*
+		 * There is no need to initialize the internal page map entries
+		 * unless the chunk is not zeroed.
+		 */
+		if (zero == false) {
+			for (i = map_bias+1; i < chunk_npages-1; i++)
+				chunk->map[i-map_bias].bits = unzeroed;
+		} else if (config_debug) {
+			for (i = map_bias+1; i < chunk_npages-1; i++)
+				assert(chunk->map[i-map_bias].bits == unzeroed);
+		}
+		chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
+		    unzeroed;
+
+		/* Insert the run into the runs_avail_clean tree. */
+		arena_avail_tree_insert(&arena->runs_avail_clean,
+		    &chunk->map[0]);
+	}
+
+	return (chunk);
+}
+
+static void
+arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
+{
+	arena_avail_tree_t *runs_avail;
+
+	/*
+	 * Remove run from the appropriate runs_avail_* tree, so that the arena
+	 * does not use it.
+	 */
+	if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
+		runs_avail = &arena->runs_avail_clean;
+	else
+		runs_avail = &arena->runs_avail_dirty;
+	arena_avail_tree_remove(runs_avail, &chunk->map[0]);
+
+	if (arena->spare != NULL) {
+		arena_chunk_t *spare = arena->spare;
+
+		arena->spare = chunk;
+		if (spare->dirtied) {
+			ql_remove(&chunk->arena->chunks_dirty, spare,
+			    link_dirty);
+			arena->ndirty -= spare->ndirty;
+		}
+		malloc_mutex_unlock(&arena->lock);
+		chunk_dealloc((void *)spare, chunksize, true);
+		malloc_mutex_lock(&arena->lock);
+		if (config_stats)
+			arena->stats.mapped -= chunksize;
+	} else
+		arena->spare = chunk;
+}
+
+static arena_run_t *
+arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
+{
+	arena_chunk_t *chunk;
+	arena_run_t *run;
+	arena_chunk_map_t *mapelm, key;
+
+	assert(size <= arena_maxclass);
+	assert((size & PAGE_MASK) == 0);
+
+	/* Search the arena's chunks for the lowest best fit. */
+	key.bits = size | CHUNK_MAP_KEY;
+	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
+	if (mapelm != NULL) {
+		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
+		size_t pageind = (((uintptr_t)mapelm -
+		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+		    + map_bias;
+
+		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
+		    LG_PAGE));
+		arena_run_split(arena, run, size, large, zero);
+		return (run);
+	}
+	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
+	if (mapelm != NULL) {
+		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
+		size_t pageind = (((uintptr_t)mapelm -
+		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+		    + map_bias;
+
+		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
+		    LG_PAGE));
+		arena_run_split(arena, run, size, large, zero);
+		return (run);
+	}
+
+	/*
+	 * No usable runs.  Create a new chunk from which to allocate the run.
+	 */
+	chunk = arena_chunk_alloc(arena);
+	if (chunk != NULL) {
+		run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
+		arena_run_split(arena, run, size, large, zero);
+		return (run);
+	}
+
+	/*
+	 * arena_chunk_alloc() failed, but another thread may have made
+	 * sufficient memory available while this one dropped arena->lock in
+	 * arena_chunk_alloc(), so search one more time.
+	 */
+	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
+	if (mapelm != NULL) {
+		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
+		size_t pageind = (((uintptr_t)mapelm -
+		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+		    + map_bias;
+
+		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
+		    LG_PAGE));
+		arena_run_split(arena, run, size, large, zero);
+		return (run);
+	}
+	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
+	if (mapelm != NULL) {
+		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
+		size_t pageind = (((uintptr_t)mapelm -
+		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+		    + map_bias;
+
+		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
+		    LG_PAGE));
+		arena_run_split(arena, run, size, large, zero);
+		return (run);
+	}
+
+	return (NULL);
+}
+
+static inline void
+arena_maybe_purge(arena_t *arena)
+{
+
+	/* Enforce opt_lg_dirty_mult. */
+	if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
+	    (arena->ndirty - arena->npurgatory) > chunk_npages &&
+	    (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
+	    arena->npurgatory))
+		arena_purge(arena, false);
+}
+
+static inline void
+arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
+{
+	ql_head(arena_chunk_map_t) mapelms;
+	arena_chunk_map_t *mapelm;
+	size_t pageind, flag_unzeroed;
+	size_t ndirty;
+	size_t nmadvise;
+
+	ql_new(&mapelms);
+
+	flag_unzeroed =
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
+   /*
+    * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
+    * mappings, but not for file-backed mappings.
+    */
+	    0
+#else
+	    CHUNK_MAP_UNZEROED
+#endif
+	    ;
+
+	/*
+	 * If chunk is the spare, temporarily re-allocate it, 1) so that its
+	 * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
+	 * completely discarded by another thread while arena->lock is dropped
+	 * by this thread.  Note that the arena_run_dalloc() call will
+	 * implicitly deallocate the chunk, so no explicit action is required
+	 * in this function to deallocate the chunk.
+	 *
+	 * Note that once a chunk contains dirty pages, it cannot again contain
+	 * a single run unless 1) it is a dirty run, or 2) this function purges
+	 * dirty pages and causes the transition to a single clean run.  Thus
+	 * (chunk == arena->spare) is possible, but it is not possible for
+	 * this function to be called on the spare unless it contains a dirty
+	 * run.
+	 */
+	if (chunk == arena->spare) {
+		assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0);
+		arena_chunk_alloc(arena);
+	}
+
+	/* Temporarily allocate all free dirty runs within chunk. */
+	for (pageind = map_bias; pageind < chunk_npages;) {
+		mapelm = &chunk->map[pageind-map_bias];
+		if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) {
+			size_t npages;
+
+			npages = mapelm->bits >> LG_PAGE;
+			assert(pageind + npages <= chunk_npages);
+			if (mapelm->bits & CHUNK_MAP_DIRTY) {
+				size_t i;
+
+				arena_avail_tree_remove(
+				    &arena->runs_avail_dirty, mapelm);
+
+				mapelm->bits = (npages << LG_PAGE) |
+				    flag_unzeroed | CHUNK_MAP_LARGE |
+				    CHUNK_MAP_ALLOCATED;
+				/*
+				 * Update internal elements in the page map, so
+				 * that CHUNK_MAP_UNZEROED is properly set.
+				 */
+				for (i = 1; i < npages - 1; i++) {
+					chunk->map[pageind+i-map_bias].bits =
+					    flag_unzeroed;
+				}
+				if (npages > 1) {
+					chunk->map[
+					    pageind+npages-1-map_bias].bits =
+					    flag_unzeroed | CHUNK_MAP_LARGE |
+					    CHUNK_MAP_ALLOCATED;
+				}
+
+				if (config_stats) {
+					/*
+					 * Update stats_cactive if nactive is
+					 * crossing a chunk multiple.
+					 */
+					size_t cactive_diff =
+					    CHUNK_CEILING((arena->nactive +
+					    npages) << LG_PAGE) -
+					    CHUNK_CEILING(arena->nactive <<
+					    LG_PAGE);
+					if (cactive_diff != 0)
+						stats_cactive_add(cactive_diff);
+				}
+				arena->nactive += npages;
+				/* Append to list for later processing. */
+				ql_elm_new(mapelm, u.ql_link);
+				ql_tail_insert(&mapelms, mapelm, u.ql_link);
+			}
+
+			pageind += npages;
+		} else {
+			/* Skip allocated run. */
+			if (mapelm->bits & CHUNK_MAP_LARGE)
+				pageind += mapelm->bits >> LG_PAGE;
+			else {
+				arena_run_t *run = (arena_run_t *)((uintptr_t)
+				    chunk + (uintptr_t)(pageind << LG_PAGE));
+
+				assert((mapelm->bits >> LG_PAGE) == 0);
+				size_t binind = arena_bin_index(arena,
+				    run->bin);
+				arena_bin_info_t *bin_info =
+				    &arena_bin_info[binind];
+				pageind += bin_info->run_size >> LG_PAGE;
+			}
+		}
+	}
+	assert(pageind == chunk_npages);
+
+	if (config_debug)
+		ndirty = chunk->ndirty;
+	if (config_stats)
+		arena->stats.purged += chunk->ndirty;
+	arena->ndirty -= chunk->ndirty;
+	chunk->ndirty = 0;
+	ql_remove(&arena->chunks_dirty, chunk, link_dirty);
+	chunk->dirtied = false;
+
+	malloc_mutex_unlock(&arena->lock);
+	if (config_stats)
+		nmadvise = 0;
+	ql_foreach(mapelm, &mapelms, u.ql_link) {
+		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+		    sizeof(arena_chunk_map_t)) + map_bias;
+		size_t npages = mapelm->bits >> LG_PAGE;
+
+		assert(pageind + npages <= chunk_npages);
+		assert(ndirty >= npages);
+		if (config_debug)
+			ndirty -= npages;
+
+		madvise((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
+		    (npages << LG_PAGE), JEMALLOC_MADV_PURGE);
+		if (config_stats)
+			nmadvise++;
+	}
+	assert(ndirty == 0);
+	malloc_mutex_lock(&arena->lock);
+	if (config_stats)
+		arena->stats.nmadvise += nmadvise;
+
+	/* Deallocate runs. */
+	for (mapelm = ql_first(&mapelms); mapelm != NULL;
+	    mapelm = ql_first(&mapelms)) {
+		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+		    sizeof(arena_chunk_map_t)) + map_bias;
+		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+		    (uintptr_t)(pageind << LG_PAGE));
+
+		ql_remove(&mapelms, mapelm, u.ql_link);
+		arena_run_dalloc(arena, run, false);
+	}
+}
+
+static void
+arena_purge(arena_t *arena, bool all)
+{
+	arena_chunk_t *chunk;
+	size_t npurgatory;
+	if (config_debug) {
+		size_t ndirty = 0;
+
+		ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
+		    assert(chunk->dirtied);
+		    ndirty += chunk->ndirty;
+		}
+		assert(ndirty == arena->ndirty);
+	}
+	assert(arena->ndirty > arena->npurgatory || all);
+	assert(arena->ndirty - arena->npurgatory > chunk_npages || all);
+	assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
+	    arena->npurgatory) || all);
+
+	if (config_stats)
+		arena->stats.npurge++;
+
+	/*
+	 * Compute the minimum number of pages that this thread should try to
+	 * purge, and add the result to arena->npurgatory.  This will keep
+	 * multiple threads from racing to reduce ndirty below the threshold.
+	 */
+	npurgatory = arena->ndirty - arena->npurgatory;
+	if (all == false) {
+		assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
+		npurgatory -= arena->nactive >> opt_lg_dirty_mult;
+	}
+	arena->npurgatory += npurgatory;
+
+	while (npurgatory > 0) {
+		/* Get next chunk with dirty pages. */
+		chunk = ql_first(&arena->chunks_dirty);
+		if (chunk == NULL) {
+			/*
+			 * This thread was unable to purge as many pages as
+			 * originally intended, due to races with other threads
+			 * that either did some of the purging work, or re-used
+			 * dirty pages.
+			 */
+			arena->npurgatory -= npurgatory;
+			return;
+		}
+		while (chunk->ndirty == 0) {
+			ql_remove(&arena->chunks_dirty, chunk, link_dirty);
+			chunk->dirtied = false;
+			chunk = ql_first(&arena->chunks_dirty);
+			if (chunk == NULL) {
+				/* Same logic as for above. */
+				arena->npurgatory -= npurgatory;
+				return;
+			}
+		}
+
+		if (chunk->ndirty > npurgatory) {
+			/*
+			 * This thread will, at a minimum, purge all the dirty
+			 * pages in chunk, so set npurgatory to reflect this
+			 * thread's commitment to purge the pages.  This tends
+			 * to reduce the chances of the following scenario:
+			 *
+			 * 1) This thread sets arena->npurgatory such that
+			 *    (arena->ndirty - arena->npurgatory) is at the
+			 *    threshold.
+			 * 2) This thread drops arena->lock.
+			 * 3) Another thread causes one or more pages to be
+			 *    dirtied, and immediately determines that it must
+			 *    purge dirty pages.
+			 *
+			 * If this scenario *does* play out, that's okay,
+			 * because all of the purging work being done really
+			 * needs to happen.
+			 */
+			arena->npurgatory += chunk->ndirty - npurgatory;
+			npurgatory = chunk->ndirty;
+		}
+
+		arena->npurgatory -= chunk->ndirty;
+		npurgatory -= chunk->ndirty;
+		arena_chunk_purge(arena, chunk);
+	}
+}
+
+void
+arena_purge_all(arena_t *arena)
+{
+
+	malloc_mutex_lock(&arena->lock);
+	arena_purge(arena, true);
+	malloc_mutex_unlock(&arena->lock);
+}
+
+static void
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
+{
+	arena_chunk_t *chunk;
+	size_t size, run_ind, run_pages, flag_dirty;
+	arena_avail_tree_t *runs_avail;
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+	assert(run_ind >= map_bias);
+	assert(run_ind < chunk_npages);
+	if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) {
+		size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK;
+		assert(size == PAGE ||
+		    (chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
+		    ~PAGE_MASK) == 0);
+		assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
+		    CHUNK_MAP_LARGE) != 0);
+		assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
+		    CHUNK_MAP_ALLOCATED) != 0);
+	} else {
+		size_t binind = arena_bin_index(arena, run->bin);
+		arena_bin_info_t *bin_info = &arena_bin_info[binind];
+		size = bin_info->run_size;
+	}
+	run_pages = (size >> LG_PAGE);
+	if (config_stats) {
+		/*
+		 * Update stats_cactive if nactive is crossing a chunk
+		 * multiple.
+		 */
+		size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
+		    CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
+		if (cactive_diff != 0)
+			stats_cactive_sub(cactive_diff);
+	}
+	arena->nactive -= run_pages;
+
+	/*
+	 * The run is dirty if the caller claims to have dirtied it, as well as
+	 * if it was already dirty before being allocated.
+	 */
+	if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0)
+		dirty = true;
+	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
+	runs_avail = dirty ? &arena->runs_avail_dirty :
+	    &arena->runs_avail_clean;
+
+	/* Mark pages as unallocated in the chunk map. */
+	if (dirty) {
+		chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY;
+		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
+		    CHUNK_MAP_DIRTY;
+
+		chunk->ndirty += run_pages;
+		arena->ndirty += run_pages;
+	} else {
+		chunk->map[run_ind-map_bias].bits = size |
+		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED);
+		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
+		    (chunk->map[run_ind+run_pages-1-map_bias].bits &
+		    CHUNK_MAP_UNZEROED);
+	}
+
+	/* Try to coalesce forward. */
+	if (run_ind + run_pages < chunk_npages &&
+	    (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED)
+	    == 0 && (chunk->map[run_ind+run_pages-map_bias].bits &
+	    CHUNK_MAP_DIRTY) == flag_dirty) {
+		size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
+		    ~PAGE_MASK;
+		size_t nrun_pages = nrun_size >> LG_PAGE;
+
+		/*
+		 * Remove successor from runs_avail; the coalesced run is
+		 * inserted later.
+		 */
+		assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
+		    & ~PAGE_MASK) == nrun_size);
+		assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
+		    & CHUNK_MAP_ALLOCATED) == 0);
+		assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
+		    & CHUNK_MAP_DIRTY) == flag_dirty);
+		arena_avail_tree_remove(runs_avail,
+		    &chunk->map[run_ind+run_pages-map_bias]);
+
+		size += nrun_size;
+		run_pages += nrun_pages;
+
+		chunk->map[run_ind-map_bias].bits = size |
+		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
+		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
+		    (chunk->map[run_ind+run_pages-1-map_bias].bits &
+		    CHUNK_MAP_FLAGS_MASK);
+	}
+
+	/* Try to coalesce backward. */
+	if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits &
+	    CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits &
+	    CHUNK_MAP_DIRTY) == flag_dirty) {
+		size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
+		    ~PAGE_MASK;
+		size_t prun_pages = prun_size >> LG_PAGE;
+
+		run_ind -= prun_pages;
+
+		/*
+		 * Remove predecessor from runs_avail; the coalesced run is
+		 * inserted later.
+		 */
+		assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK)
+		    == prun_size);
+		assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED)
+		    == 0);
+		assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY)
+		    == flag_dirty);
+		arena_avail_tree_remove(runs_avail,
+		    &chunk->map[run_ind-map_bias]);
+
+		size += prun_size;
+		run_pages += prun_pages;
+
+		chunk->map[run_ind-map_bias].bits = size |
+		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
+		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
+		    (chunk->map[run_ind+run_pages-1-map_bias].bits &
+		    CHUNK_MAP_FLAGS_MASK);
+	}
+
+	/* Insert into runs_avail, now that coalescing is complete. */
+	assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) ==
+	    (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK));
+	assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) ==
+	    (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY));
+	arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]);
+
+	if (dirty) {
+		/*
+		 * Insert into chunks_dirty before potentially calling
+		 * arena_chunk_dealloc(), so that chunks_dirty and
+		 * arena->ndirty are consistent.
+		 */
+		if (chunk->dirtied == false) {
+			ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
+			chunk->dirtied = true;
+		}
+	}
+
+	/*
+	 * Deallocate chunk if it is now completely unused.  The bit
+	 * manipulation checks whether the first run is unallocated and extends
+	 * to the end of the chunk.
+	 */
+	if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) ==
+	    arena_maxclass)
+		arena_chunk_dealloc(arena, chunk);
+
+	/*
+	 * It is okay to do dirty page processing here even if the chunk was
+	 * deallocated above, since in that case it is the spare.  Waiting
+	 * until after possible chunk deallocation to do dirty processing
+	 * allows for an old spare to be fully deallocated, thus decreasing the
+	 * chances of spuriously crossing the dirty page purging threshold.
+	 */
+	if (dirty)
+		arena_maybe_purge(arena);
+}
+
+static void
+arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+    size_t oldsize, size_t newsize)
+{
+	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
+	size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY;
+
+	assert(oldsize > newsize);
+
+	/*
+	 * Update the chunk map so that arena_run_dalloc() can treat the
+	 * leading run as separately allocated.  Set the last element of each
+	 * run first, in case of single-page runs.
+	 */
+	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
+	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
+	chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
+	    (chunk->map[pageind+head_npages-1-map_bias].bits &
+	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+	chunk->map[pageind-map_bias].bits = (oldsize - newsize)
+	    | flag_dirty | (chunk->map[pageind-map_bias].bits &
+	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+
+	if (config_debug) {
+		UNUSED size_t tail_npages = newsize >> LG_PAGE;
+		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
+		    .bits & ~PAGE_MASK) == 0);
+		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
+		    .bits & CHUNK_MAP_DIRTY) == flag_dirty);
+		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
+		    .bits & CHUNK_MAP_LARGE) != 0);
+		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
+		    .bits & CHUNK_MAP_ALLOCATED) != 0);
+	}
+	chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
+	    (chunk->map[pageind+head_npages-map_bias].bits &
+	    CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+
+	arena_run_dalloc(arena, run, false);
+}
+
+static void
+arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+    size_t oldsize, size_t newsize, bool dirty)
+{
+	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+	size_t head_npages = newsize >> LG_PAGE;
+	size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
+	size_t flag_dirty = chunk->map[pageind-map_bias].bits &
+	    CHUNK_MAP_DIRTY;
+
+	assert(oldsize > newsize);
+
+	/*
+	 * Update the chunk map so that arena_run_dalloc() can treat the
+	 * trailing run as separately allocated.  Set the last element of each
+	 * run first, in case of single-page runs.
+	 */
+	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
+	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
+	chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
+	    (chunk->map[pageind+head_npages-1-map_bias].bits &
+	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+	chunk->map[pageind-map_bias].bits = newsize | flag_dirty |
+	    (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) |
+	    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+
+	assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
+	    ~PAGE_MASK) == 0);
+	assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
+	    CHUNK_MAP_LARGE) != 0);
+	assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
+	    CHUNK_MAP_ALLOCATED) != 0);
+	chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits =
+	    flag_dirty |
+	    (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
+	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+	chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) |
+	    flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits &
+	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+
+	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
+	    dirty);
+}
+
+static arena_run_t *
+arena_bin_runs_first(arena_bin_t *bin)
+{
+	arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
+	if (mapelm != NULL) {
+		arena_chunk_t *chunk;
+		size_t pageind;
+
+		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
+		pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+		    sizeof(arena_chunk_map_t))) + map_bias;
+		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+		    (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
+		    LG_PAGE));
+		return (run);
+	}
+
+	return (NULL);
+}
+
+static void
+arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
+{
+	arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
+	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+	arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
+
+	assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
+
+	arena_run_tree_insert(&bin->runs, mapelm);
+}
+
+static void
+arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
+{
+	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+	arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
+
+	assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
+
+	arena_run_tree_remove(&bin->runs, mapelm);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_tryget(arena_bin_t *bin)
+{
+	arena_run_t *run = arena_bin_runs_first(bin);
+	if (run != NULL) {
+		arena_bin_runs_remove(bin, run);
+		if (config_stats)
+			bin->stats.reruns++;
+	}
+	return (run);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+{
+	arena_run_t *run;
+	size_t binind;
+	arena_bin_info_t *bin_info;
+
+	/* Look for a usable run. */
+	run = arena_bin_nonfull_run_tryget(bin);
+	if (run != NULL)
+		return (run);
+	/* No existing runs have any space available. */
+
+	binind = arena_bin_index(arena, bin);
+	bin_info = &arena_bin_info[binind];
+
+	/* Allocate a new run. */
+	malloc_mutex_unlock(&bin->lock);
+	/******************************/
+	malloc_mutex_lock(&arena->lock);
+	run = arena_run_alloc(arena, bin_info->run_size, false, false);
+	if (run != NULL) {
+		bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
+		    (uintptr_t)bin_info->bitmap_offset);
+
+		/* Initialize run internals. */
+		run->bin = bin;
+		run->nextind = 0;
+		run->nfree = bin_info->nregs;
+		bitmap_init(bitmap, &bin_info->bitmap_info);
+	}
+	malloc_mutex_unlock(&arena->lock);
+	/********************************/
+	malloc_mutex_lock(&bin->lock);
+	if (run != NULL) {
+		if (config_stats) {
+			bin->stats.nruns++;
+			bin->stats.curruns++;
+		}
+		return (run);
+	}
+
+	/*
+	 * arena_run_alloc() failed, but another thread may have made
+	 * sufficient memory available while this one dropped bin->lock above,
+	 * so search one more time.
+	 */
+	run = arena_bin_nonfull_run_tryget(bin);
+	if (run != NULL)
+		return (run);
+
+	return (NULL);
+}
+
+/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
+static void *
+arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
+{
+	void *ret;
+	size_t binind;
+	arena_bin_info_t *bin_info;
+	arena_run_t *run;
+
+	binind = arena_bin_index(arena, bin);
+	bin_info = &arena_bin_info[binind];
+	bin->runcur = NULL;
+	run = arena_bin_nonfull_run_get(arena, bin);
+	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
+		/*
+		 * Another thread updated runcur while this one ran without the
+		 * bin lock in arena_bin_nonfull_run_get().
+		 */
+		assert(bin->runcur->nfree > 0);
+		ret = arena_run_reg_alloc(bin->runcur, bin_info);
+		if (run != NULL) {
+			arena_chunk_t *chunk;
+
+			/*
+			 * arena_run_alloc() may have allocated run, or it may
+			 * have pulled run from the bin's run tree.  Therefore
+			 * it is unsafe to make any assumptions about how run
+			 * has previously been used, and arena_bin_lower_run()
+			 * must be called, as if a region were just deallocated
+			 * from the run.
+			 */
+			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+			if (run->nfree == bin_info->nregs)
+				arena_dalloc_bin_run(arena, chunk, run, bin);
+			else
+				arena_bin_lower_run(arena, chunk, run, bin);
+		}
+		return (ret);
+	}
+
+	if (run == NULL)
+		return (NULL);
+
+	bin->runcur = run;
+
+	assert(bin->runcur->nfree > 0);
+
+	return (arena_run_reg_alloc(bin->runcur, bin_info));
+}
+
+void
+arena_prof_accum(arena_t *arena, uint64_t accumbytes)
+{
+
+	if (prof_interval != 0) {
+		arena->prof_accumbytes += accumbytes;
+		if (arena->prof_accumbytes >= prof_interval) {
+			prof_idump();
+			arena->prof_accumbytes -= prof_interval;
+		}
+	}
+}
+
+void
+arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
+    uint64_t prof_accumbytes)
+{
+	unsigned i, nfill;
+	arena_bin_t *bin;
+	arena_run_t *run;
+	void *ptr;
+
+	assert(tbin->ncached == 0);
+
+	if (config_prof) {
+		malloc_mutex_lock(&arena->lock);
+		arena_prof_accum(arena, prof_accumbytes);
+		malloc_mutex_unlock(&arena->lock);
+	}
+	bin = &arena->bins[binind];
+	malloc_mutex_lock(&bin->lock);
+	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
+	    tbin->lg_fill_div); i < nfill; i++) {
+		if ((run = bin->runcur) != NULL && run->nfree > 0)
+			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
+		else
+			ptr = arena_bin_malloc_hard(arena, bin);
+		if (ptr == NULL)
+			break;
+		if (config_fill && opt_junk) {
+			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
+			    true);
+		}
+		/* Insert such that low regions get used first. */
+		tbin->avail[nfill - 1 - i] = ptr;
+	}
+	if (config_stats) {
+		bin->stats.allocated += i * arena_bin_info[binind].reg_size;
+		bin->stats.nmalloc += i;
+		bin->stats.nrequests += tbin->tstats.nrequests;
+		bin->stats.nfills++;
+		tbin->tstats.nrequests = 0;
+	}
+	malloc_mutex_unlock(&bin->lock);
+	tbin->ncached = i;
+}
+
+void
+arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
+{
+
+	if (zero) {
+		size_t redzone_size = bin_info->redzone_size;
+		memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
+		    redzone_size);
+		memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
+		    redzone_size);
+	} else {
+		memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
+		    bin_info->reg_interval);
+	}
+}
+
+void
+arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+{
+	size_t size = bin_info->reg_size;
+	size_t redzone_size = bin_info->redzone_size;
+	size_t i;
+	bool error = false;
+
+	for (i = 1; i <= redzone_size; i++) {
+		unsigned byte;
+		if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
+			error = true;
+			malloc_printf("<jemalloc>: Corrupt redzone "
+			    "%zu byte%s before %p (size %zu), byte=%#x\n", i,
+			    (i == 1) ? "" : "s", ptr, size, byte);
+		}
+	}
+	for (i = 0; i < redzone_size; i++) {
+		unsigned byte;
+		if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
+			error = true;
+			malloc_printf("<jemalloc>: Corrupt redzone "
+			    "%zu byte%s after end of %p (size %zu), byte=%#x\n",
+			    i, (i == 1) ? "" : "s", ptr, size, byte);
+		}
+	}
+	if (opt_abort && error)
+		abort();
+
+	memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
+	    bin_info->reg_interval);
+}
+
+void *
+arena_malloc_small(arena_t *arena, size_t size, bool zero)
+{
+	void *ret;
+	arena_bin_t *bin;
+	arena_run_t *run;
+	size_t binind;
+
+	binind = SMALL_SIZE2BIN(size);
+	assert(binind < NBINS);
+	bin = &arena->bins[binind];
+	size = arena_bin_info[binind].reg_size;
+
+	malloc_mutex_lock(&bin->lock);
+	if ((run = bin->runcur) != NULL && run->nfree > 0)
+		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
+	else
+		ret = arena_bin_malloc_hard(arena, bin);
+
+	if (ret == NULL) {
+		malloc_mutex_unlock(&bin->lock);
+		return (NULL);
+	}
+
+	if (config_stats) {
+		bin->stats.allocated += size;
+		bin->stats.nmalloc++;
+		bin->stats.nrequests++;
+	}
+	malloc_mutex_unlock(&bin->lock);
+	if (config_prof && isthreaded == false) {
+		malloc_mutex_lock(&arena->lock);
+		arena_prof_accum(arena, size);
+		malloc_mutex_unlock(&arena->lock);
+	}
+
+	if (zero == false) {
+		if (config_fill) {
+			if (opt_junk) {
+				arena_alloc_junk_small(ret,
+				    &arena_bin_info[binind], false);
+			} else if (opt_zero)
+				memset(ret, 0, size);
+		}
+	} else {
+		if (config_fill && opt_junk) {
+			arena_alloc_junk_small(ret, &arena_bin_info[binind],
+			    true);
+		}
+		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+		memset(ret, 0, size);
+	}
+
+	return (ret);
+}
+
+void *
+arena_malloc_large(arena_t *arena, size_t size, bool zero)
+{
+	void *ret;
+
+	/* Large allocation. */
+	size = PAGE_CEILING(size);
+	malloc_mutex_lock(&arena->lock);
+	ret = (void *)arena_run_alloc(arena, size, true, zero);
+	if (ret == NULL) {
+		malloc_mutex_unlock(&arena->lock);
+		return (NULL);
+	}
+	if (config_stats) {
+		arena->stats.nmalloc_large++;
+		arena->stats.nrequests_large++;
+		arena->stats.allocated_large += size;
+		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
+		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+	}
+	if (config_prof)
+		arena_prof_accum(arena, size);
+	malloc_mutex_unlock(&arena->lock);
+
+	if (zero == false) {
+		if (config_fill) {
+			if (opt_junk)
+				memset(ret, 0xa5, size);
+			else if (opt_zero)
+				memset(ret, 0, size);
+		}
+	}
+
+	return (ret);
+}
+
+/* Only handles large allocations that require more than page alignment. */
+void *
+arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
+{
+	void *ret;
+	size_t alloc_size, leadsize, trailsize;
+	arena_run_t *run;
+	arena_chunk_t *chunk;
+
+	assert((size & PAGE_MASK) == 0);
+
+	alignment = PAGE_CEILING(alignment);
+	alloc_size = size + alignment - PAGE;
+
+	malloc_mutex_lock(&arena->lock);
+	run = arena_run_alloc(arena, alloc_size, true, zero);
+	if (run == NULL) {
+		malloc_mutex_unlock(&arena->lock);
+		return (NULL);
+	}
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+
+	leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
+	    (uintptr_t)run;
+	assert(alloc_size >= leadsize + size);
+	trailsize = alloc_size - leadsize - size;
+	ret = (void *)((uintptr_t)run + leadsize);
+	if (leadsize != 0) {
+		arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
+		    leadsize);
+	}
+	if (trailsize != 0) {
+		arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
+		    false);
+	}
+
+	if (config_stats) {
+		arena->stats.nmalloc_large++;
+		arena->stats.nrequests_large++;
+		arena->stats.allocated_large += size;
+		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
+		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+	}
+	malloc_mutex_unlock(&arena->lock);
+
+	if (config_fill && zero == false) {
+		if (opt_junk)
+			memset(ret, 0xa5, size);
+		else if (opt_zero)
+			memset(ret, 0, size);
+	}
+	return (ret);
+}
+
+/* Return the size of the allocation pointed to by ptr. */
+size_t
+arena_salloc(const void *ptr, bool demote)
+{
+	size_t ret;
+	arena_chunk_t *chunk;
+	size_t pageind, mapbits;
+
+	assert(ptr != NULL);
+	assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+	mapbits = chunk->map[pageind-map_bias].bits;
+	assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+	if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+		    (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
+		size_t binind = arena_bin_index(chunk->arena, run->bin);
+		arena_bin_info_t *bin_info = &arena_bin_info[binind];
+		assert(((uintptr_t)ptr - ((uintptr_t)run +
+		    (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
+		    == 0);
+		ret = bin_info->reg_size;
+	} else {
+		assert(((uintptr_t)ptr & PAGE_MASK) == 0);
+		ret = mapbits & ~PAGE_MASK;
+		if (demote && prof_promote && ret == PAGE && (mapbits &
+		    CHUNK_MAP_CLASS_MASK) != 0) {
+			size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
+			    CHUNK_MAP_CLASS_SHIFT) - 1;
+			assert(binind < NBINS);
+			ret = arena_bin_info[binind].reg_size;
+		}
+		assert(ret != 0);
+	}
+
+	return (ret);
+}
+
+void
+arena_prof_promoted(const void *ptr, size_t size)
+{
+	arena_chunk_t *chunk;
+	size_t pageind, binind;
+
+	assert(config_prof);
+	assert(ptr != NULL);
+	assert(CHUNK_ADDR2BASE(ptr) != ptr);
+	assert(isalloc(ptr, false) == PAGE);
+	assert(isalloc(ptr, true) == PAGE);
+	assert(size <= SMALL_MAXCLASS);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+	binind = SMALL_SIZE2BIN(size);
+	assert(binind < NBINS);
+	chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
+	    ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
+
+	assert(isalloc(ptr, false) == PAGE);
+	assert(isalloc(ptr, true) == size);
+}
+
+static void
+arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
+    arena_bin_t *bin)
+{
+
+	/* Dissociate run from bin. */
+	if (run == bin->runcur)
+		bin->runcur = NULL;
+	else {
+		size_t binind = arena_bin_index(chunk->arena, bin);
+		arena_bin_info_t *bin_info = &arena_bin_info[binind];
+
+		if (bin_info->nregs != 1) {
+			/*
+			 * This block's conditional is necessary because if the
+			 * run only contains one region, then it never gets
+			 * inserted into the non-full runs tree.
+			 */
+			arena_bin_runs_remove(bin, run);
+		}
+	}
+}
+
+static void
+arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+    arena_bin_t *bin)
+{
+	size_t binind;
+	arena_bin_info_t *bin_info;
+	size_t npages, run_ind, past;
+
+	assert(run != bin->runcur);
+	assert(arena_run_tree_search(&bin->runs, &chunk->map[
+	    (((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)-map_bias]) == NULL);
+
+	binind = arena_bin_index(chunk->arena, run->bin);
+	bin_info = &arena_bin_info[binind];
+
+	malloc_mutex_unlock(&bin->lock);
+	/******************************/
+	npages = bin_info->run_size >> LG_PAGE;
+	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+	past = (size_t)(PAGE_CEILING((uintptr_t)run +
+	    (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
+	    bin_info->reg_interval - bin_info->redzone_size) -
+	    (uintptr_t)chunk) >> LG_PAGE);
+	malloc_mutex_lock(&arena->lock);
+
+	/*
+	 * If the run was originally clean, and some pages were never touched,
+	 * trim the clean pages before deallocating the dirty portion of the
+	 * run.
+	 */
+	if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past
+	    - run_ind < npages) {
+		/*
+		 * Trim clean pages.  Convert to large run beforehand.  Set the
+		 * last map element first, in case this is a one-page run.
+		 */
+		chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE |
+		    (chunk->map[run_ind+npages-1-map_bias].bits &
+		    CHUNK_MAP_FLAGS_MASK);
+		chunk->map[run_ind-map_bias].bits = bin_info->run_size |
+		    CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
+		    CHUNK_MAP_FLAGS_MASK);
+		arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
+		    ((past - run_ind) << LG_PAGE), false);
+		/* npages = past - run_ind; */
+	}
+	arena_run_dalloc(arena, run, true);
+	malloc_mutex_unlock(&arena->lock);
+	/****************************/
+	malloc_mutex_lock(&bin->lock);
+	if (config_stats)
+		bin->stats.curruns--;
+}
+
+static void
+arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+    arena_bin_t *bin)
+{
+
+	/*
+	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
+	 * non-full run.  It is okay to NULL runcur out rather than proactively
+	 * keeping it pointing at the lowest non-full run.
+	 */
+	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+		/* Switch runcur. */
+		if (bin->runcur->nfree > 0)
+			arena_bin_runs_insert(bin, bin->runcur);
+		bin->runcur = run;
+		if (config_stats)
+			bin->stats.reruns++;
+	} else
+		arena_bin_runs_insert(bin, run);
+}
+
+void
+arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    arena_chunk_map_t *mapelm)
+{
+	size_t pageind;
+	arena_run_t *run;
+	arena_bin_t *bin;
+	size_t size;
+
+	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
+	    (mapelm->bits >> LG_PAGE)) << LG_PAGE));
+	bin = run->bin;
+	size_t binind = arena_bin_index(arena, bin);
+	arena_bin_info_t *bin_info = &arena_bin_info[binind];
+	if (config_fill || config_stats)
+		size = bin_info->reg_size;
+
+	if (config_fill && opt_junk)
+		arena_dalloc_junk_small(ptr, bin_info);
+
+	arena_run_reg_dalloc(run, ptr);
+	if (run->nfree == bin_info->nregs) {
+		arena_dissociate_bin_run(chunk, run, bin);
+		arena_dalloc_bin_run(arena, chunk, run, bin);
+	} else if (run->nfree == 1 && run != bin->runcur)
+		arena_bin_lower_run(arena, chunk, run, bin);
+
+	if (config_stats) {
+		bin->stats.allocated -= size;
+		bin->stats.ndalloc++;
+	}
+}
+
+void
+arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
+    arena_stats_t *astats, malloc_bin_stats_t *bstats,
+    malloc_large_stats_t *lstats)
+{
+	unsigned i;
+
+	malloc_mutex_lock(&arena->lock);
+	*nactive += arena->nactive;
+	*ndirty += arena->ndirty;
+
+	astats->mapped += arena->stats.mapped;
+	astats->npurge += arena->stats.npurge;
+	astats->nmadvise += arena->stats.nmadvise;
+	astats->purged += arena->stats.purged;
+	astats->allocated_large += arena->stats.allocated_large;
+	astats->nmalloc_large += arena->stats.nmalloc_large;
+	astats->ndalloc_large += arena->stats.ndalloc_large;
+	astats->nrequests_large += arena->stats.nrequests_large;
+
+	for (i = 0; i < nlclasses; i++) {
+		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
+		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
+		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
+		lstats[i].curruns += arena->stats.lstats[i].curruns;
+	}
+	malloc_mutex_unlock(&arena->lock);
+
+	for (i = 0; i < NBINS; i++) {
+		arena_bin_t *bin = &arena->bins[i];
+
+		malloc_mutex_lock(&bin->lock);
+		bstats[i].allocated += bin->stats.allocated;
+		bstats[i].nmalloc += bin->stats.nmalloc;
+		bstats[i].ndalloc += bin->stats.ndalloc;
+		bstats[i].nrequests += bin->stats.nrequests;
+		if (config_tcache) {
+			bstats[i].nfills += bin->stats.nfills;
+			bstats[i].nflushes += bin->stats.nflushes;
+		}
+		bstats[i].nruns += bin->stats.nruns;
+		bstats[i].reruns += bin->stats.reruns;
+		bstats[i].curruns += bin->stats.curruns;
+		malloc_mutex_unlock(&bin->lock);
+	}
+}
+
+void
+arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+{
+
+	if (config_fill || config_stats) {
+		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+		size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
+
+		if (config_fill && config_stats && opt_junk)
+			memset(ptr, 0x5a, size);
+		if (config_stats) {
+			arena->stats.ndalloc_large++;
+			arena->stats.allocated_large -= size;
+			arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
+			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
+		}
+	}
+
+	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+}
+
+static void
+arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t oldsize, size_t size)
+{
+
+	assert(size < oldsize);
+
+	/*
+	 * Shrink the run, and make trailing pages available for other
+	 * allocations.
+	 */
+	malloc_mutex_lock(&arena->lock);
+	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
+	    true);
+	if (config_stats) {
+		arena->stats.ndalloc_large++;
+		arena->stats.allocated_large -= oldsize;
+		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
+		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+
+		arena->stats.nmalloc_large++;
+		arena->stats.nrequests_large++;
+		arena->stats.allocated_large += size;
+		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
+		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+	}
+	malloc_mutex_unlock(&arena->lock);
+}
+
+static bool
+arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t oldsize, size_t size, size_t extra, bool zero)
+{
+	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+	size_t npages = oldsize >> LG_PAGE;
+	size_t followsize;
+
+	assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK));
+
+	/* Try to extend the run. */
+	assert(size + extra > oldsize);
+	malloc_mutex_lock(&arena->lock);
+	if (pageind + npages < chunk_npages &&
+	    (chunk->map[pageind+npages-map_bias].bits
+	    & CHUNK_MAP_ALLOCATED) == 0 && (followsize =
+	    chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size -
+	    oldsize) {
+		/*
+		 * The next run is available and sufficiently large.  Split the
+		 * following run, then merge the first part with the existing
+		 * allocation.
+		 */
+		size_t flag_dirty;
+		size_t splitsize = (oldsize + followsize <= size + extra)
+		    ? followsize : size + extra - oldsize;
+		arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
+		    ((pageind+npages) << LG_PAGE)), splitsize, true, zero);
+
+		size = oldsize + splitsize;
+		npages = size >> LG_PAGE;
+
+		/*
+		 * Mark the extended run as dirty if either portion of the run
+		 * was dirty before allocation.  This is rather pedantic,
+		 * because there's not actually any sequence of events that
+		 * could cause the resulting run to be passed to
+		 * arena_run_dalloc() with the dirty argument set to false
+		 * (which is when dirty flag consistency would really matter).
+		 */
+		flag_dirty = (chunk->map[pageind-map_bias].bits &
+		    CHUNK_MAP_DIRTY) |
+		    (chunk->map[pageind+npages-1-map_bias].bits &
+		    CHUNK_MAP_DIRTY);
+		chunk->map[pageind-map_bias].bits = size | flag_dirty
+		    | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+		chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
+		    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+
+		if (config_stats) {
+			arena->stats.ndalloc_large++;
+			arena->stats.allocated_large -= oldsize;
+			arena->stats.lstats[(oldsize >> LG_PAGE)
+			    - 1].ndalloc++;
+			arena->stats.lstats[(oldsize >> LG_PAGE)
+			    - 1].curruns--;
+
+			arena->stats.nmalloc_large++;
+			arena->stats.nrequests_large++;
+			arena->stats.allocated_large += size;
+			arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+			arena->stats.lstats[(size >> LG_PAGE)
+			    - 1].nrequests++;
+			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+		}
+		malloc_mutex_unlock(&arena->lock);
+		return (false);
+	}
+	malloc_mutex_unlock(&arena->lock);
+
+	return (true);
+}
+
+/*
+ * Try to resize a large allocation, in order to avoid copying.  This will
+ * always fail if growing an object, and the following run is already in use.
+ */
+static bool
+arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
+    bool zero)
+{
+	size_t psize;
+
+	psize = PAGE_CEILING(size + extra);
+	if (psize == oldsize) {
+		/* Same size class. */
+		if (config_fill && opt_junk && size < oldsize) {
+			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
+			    size);
+		}
+		return (false);
+	} else {
+		arena_chunk_t *chunk;
+		arena_t *arena;
+
+		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+		arena = chunk->arena;
+
+		if (psize < oldsize) {
+			/* Fill before shrinking in order avoid a race. */
+			if (config_fill && opt_junk) {
+				memset((void *)((uintptr_t)ptr + size), 0x5a,
+				    oldsize - size);
+			}
+			arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
+			    psize);
+			return (false);
+		} else {
+			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
+			    oldsize, PAGE_CEILING(size),
+			    psize - PAGE_CEILING(size), zero);
+			if (config_fill && ret == false && zero == false &&
+			    opt_zero) {
+				memset((void *)((uintptr_t)ptr + oldsize), 0,
+				    size - oldsize);
+			}
+			return (ret);
+		}
+	}
+}
+
+void *
+arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
+    bool zero)
+{
+
+	/*
+	 * Avoid moving the allocation if the size class can be left the same.
+	 */
+	if (oldsize <= arena_maxclass) {
+		if (oldsize <= SMALL_MAXCLASS) {
+			assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
+			    == oldsize);
+			if ((size + extra <= SMALL_MAXCLASS &&
+			    SMALL_SIZE2BIN(size + extra) ==
+			    SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
+			    size + extra >= oldsize)) {
+				if (config_fill && opt_junk && size < oldsize) {
+					memset((void *)((uintptr_t)ptr + size),
+					    0x5a, oldsize - size);
+				}
+				return (ptr);
+			}
+		} else {
+			assert(size <= arena_maxclass);
+			if (size + extra > SMALL_MAXCLASS) {
+				if (arena_ralloc_large(ptr, oldsize, size,
+				    extra, zero) == false)
+					return (ptr);
+			}
+		}
+	}
+
+	/* Reallocation would require a move. */
+	return (NULL);
+}
+
+void *
+arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
+    size_t alignment, bool zero, bool try_tcache)
+{
+	void *ret;
+	size_t copysize;
+
+	/* Try to avoid moving the allocation. */
+	ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
+	if (ret != NULL)
+		return (ret);
+
+	/*
+	 * size and oldsize are different enough that we need to move the
+	 * object.  In that case, fall back to allocating new space and
+	 * copying.
+	 */
+	if (alignment != 0) {
+		size_t usize = sa2u(size + extra, alignment);
+		if (usize == 0)
+			return (NULL);
+		ret = ipalloc(usize, alignment, zero);
+	} else
+		ret = arena_malloc(NULL, size + extra, zero, try_tcache);
+
+	if (ret == NULL) {
+		if (extra == 0)
+			return (NULL);
+		/* Try again, this time without extra. */
+		if (alignment != 0) {
+			size_t usize = sa2u(size, alignment);
+			if (usize == 0)
+				return (NULL);
+			ret = ipalloc(usize, alignment, zero);
+		} else
+			ret = arena_malloc(NULL, size, zero, try_tcache);
+
+		if (ret == NULL)
+			return (NULL);
+	}
+
+	/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
+
+	/*
+	 * Copy at most size bytes (not size+extra), since the caller has no
+	 * expectation that the extra bytes will be reliably preserved.
+	 */
+	copysize = (size < oldsize) ? size : oldsize;
+	memcpy(ret, ptr, copysize);
+	iqalloc(ptr);
+	return (ret);
+}
+
+bool
+arena_new(arena_t *arena, unsigned ind)
+{
+	unsigned i;
+	arena_bin_t *bin;
+
+	arena->ind = ind;
+	arena->nthreads = 0;
+
+	if (malloc_mutex_init(&arena->lock))
+		return (true);
+
+	if (config_stats) {
+		memset(&arena->stats, 0, sizeof(arena_stats_t));
+		arena->stats.lstats =
+		    (malloc_large_stats_t *)base_alloc(nlclasses *
+		    sizeof(malloc_large_stats_t));
+		if (arena->stats.lstats == NULL)
+			return (true);
+		memset(arena->stats.lstats, 0, nlclasses *
+		    sizeof(malloc_large_stats_t));
+		if (config_tcache)
+			ql_new(&arena->tcache_ql);
+	}
+
+	if (config_prof)
+		arena->prof_accumbytes = 0;
+
+	/* Initialize chunks. */
+	ql_new(&arena->chunks_dirty);
+	arena->spare = NULL;
+
+	arena->nactive = 0;
+	arena->ndirty = 0;
+	arena->npurgatory = 0;
+
+	arena_avail_tree_new(&arena->runs_avail_clean);
+	arena_avail_tree_new(&arena->runs_avail_dirty);
+
+	/* Initialize bins. */
+	for (i = 0; i < NBINS; i++) {
+		bin = &arena->bins[i];
+		if (malloc_mutex_init(&bin->lock))
+			return (true);
+		bin->runcur = NULL;
+		arena_run_tree_new(&bin->runs);
+		if (config_stats)
+			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+	}
+
+	return (false);
+}
+
+/*
+ * Calculate bin_info->run_size such that it meets the following constraints:
+ *
+ *   *) bin_info->run_size >= min_run_size
+ *   *) bin_info->run_size <= arena_maxclass
+ *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
+ *   *) bin_info->nregs <= RUN_MAXREGS
+ *
+ * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
+ * calculated here, since these settings are all interdependent.
+ */
+static size_t
+bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
+{
+	size_t pad_size;
+	size_t try_run_size, good_run_size;
+	uint32_t try_nregs, good_nregs;
+	uint32_t try_hdr_size, good_hdr_size;
+	uint32_t try_bitmap_offset, good_bitmap_offset;
+	uint32_t try_ctx0_offset, good_ctx0_offset;
+	uint32_t try_redzone0_offset, good_redzone0_offset;
+
+	assert(min_run_size >= PAGE);
+	assert(min_run_size <= arena_maxclass);
+
+	/*
+	 * Determine redzone size based on minimum alignment and minimum
+	 * redzone size.  Add padding to the end of the run if it is needed to
+	 * align the regions.  The padding allows each redzone to be half the
+	 * minimum alignment; without the padding, each redzone would have to
+	 * be twice as large in order to maintain alignment.
+	 */
+	if (config_fill && opt_redzone) {
+		size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
+		if (align_min <= REDZONE_MINSIZE) {
+			bin_info->redzone_size = REDZONE_MINSIZE;
+			pad_size = 0;
+		} else {
+			bin_info->redzone_size = align_min >> 1;
+			pad_size = bin_info->redzone_size;
+		}
+	} else {
+		bin_info->redzone_size = 0;
+		pad_size = 0;
+	}
+	bin_info->reg_interval = bin_info->reg_size +
+	    (bin_info->redzone_size << 1);
+
+	/*
+	 * Calculate known-valid settings before entering the run_size
+	 * expansion loop, so that the first part of the loop always copies
+	 * valid settings.
+	 *
+	 * The do..while loop iteratively reduces the number of regions until
+	 * the run header and the regions no longer overlap.  A closed formula
+	 * would be quite messy, since there is an interdependency between the
+	 * header's mask length and the number of regions.
+	 */
+	try_run_size = min_run_size;
+	try_nregs = ((try_run_size - sizeof(arena_run_t)) /
+	    bin_info->reg_interval)
+	    + 1; /* Counter-act try_nregs-- in loop. */
+	if (try_nregs > RUN_MAXREGS) {
+		try_nregs = RUN_MAXREGS
+		    + 1; /* Counter-act try_nregs-- in loop. */
+	}
+	do {
+		try_nregs--;
+		try_hdr_size = sizeof(arena_run_t);
+		/* Pad to a long boundary. */
+		try_hdr_size = LONG_CEILING(try_hdr_size);
+		try_bitmap_offset = try_hdr_size;
+		/* Add space for bitmap. */
+		try_hdr_size += bitmap_size(try_nregs);
+		if (config_prof && opt_prof && prof_promote == false) {
+			/* Pad to a quantum boundary. */
+			try_hdr_size = QUANTUM_CEILING(try_hdr_size);
+			try_ctx0_offset = try_hdr_size;
+			/* Add space for one (prof_ctx_t *) per region. */
+			try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
+		} else
+			try_ctx0_offset = 0;
+		try_redzone0_offset = try_run_size - (try_nregs *
+		    bin_info->reg_interval) - pad_size;
+	} while (try_hdr_size > try_redzone0_offset);
+
+	/* run_size expansion loop. */
+	do {
+		/*
+		 * Copy valid settings before trying more aggressive settings.
+		 */
+		good_run_size = try_run_size;
+		good_nregs = try_nregs;
+		good_hdr_size = try_hdr_size;
+		good_bitmap_offset = try_bitmap_offset;
+		good_ctx0_offset = try_ctx0_offset;
+		good_redzone0_offset = try_redzone0_offset;
+
+		/* Try more aggressive settings. */
+		try_run_size += PAGE;
+		try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
+		    bin_info->reg_interval)
+		    + 1; /* Counter-act try_nregs-- in loop. */
+		if (try_nregs > RUN_MAXREGS) {
+			try_nregs = RUN_MAXREGS
+			    + 1; /* Counter-act try_nregs-- in loop. */
+		}
+		do {
+			try_nregs--;
+			try_hdr_size = sizeof(arena_run_t);
+			/* Pad to a long boundary. */
+			try_hdr_size = LONG_CEILING(try_hdr_size);
+			try_bitmap_offset = try_hdr_size;
+			/* Add space for bitmap. */
+			try_hdr_size += bitmap_size(try_nregs);
+			if (config_prof && opt_prof && prof_promote == false) {
+				/* Pad to a quantum boundary. */
+				try_hdr_size = QUANTUM_CEILING(try_hdr_size);
+				try_ctx0_offset = try_hdr_size;
+				/*
+				 * Add space for one (prof_ctx_t *) per region.
+				 */
+				try_hdr_size += try_nregs *
+				    sizeof(prof_ctx_t *);
+			}
+			try_redzone0_offset = try_run_size - (try_nregs *
+			    bin_info->reg_interval) - pad_size;
+		} while (try_hdr_size > try_redzone0_offset);
+	} while (try_run_size <= arena_maxclass
+	    && try_run_size <= arena_maxclass
+	    && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
+	    RUN_MAX_OVRHD_RELAX
+	    && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
+	    && try_nregs < RUN_MAXREGS);
+
+	assert(good_hdr_size <= good_redzone0_offset);
+
+	/* Copy final settings. */
+	bin_info->run_size = good_run_size;
+	bin_info->nregs = good_nregs;
+	bin_info->bitmap_offset = good_bitmap_offset;
+	bin_info->ctx0_offset = good_ctx0_offset;
+	bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
+
+	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
+	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
+
+	return (good_run_size);
+}
+
+static void
+bin_info_init(void)
+{
+	arena_bin_info_t *bin_info;
+	size_t prev_run_size = PAGE;
+
+#define	SIZE_CLASS(bin, delta, size)					\
+	bin_info = &arena_bin_info[bin];				\
+	bin_info->reg_size = size;					\
+	prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
+	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+	SIZE_CLASSES
+#undef SIZE_CLASS
+}
+
+void
+arena_boot(void)
+{
+	size_t header_size;
+	unsigned i;
+
+	/*
+	 * Compute the header size such that it is large enough to contain the
+	 * page map.  The page map is biased to omit entries for the header
+	 * itself, so some iteration is necessary to compute the map bias.
+	 *
+	 * 1) Compute safe header_size and map_bias values that include enough
+	 *    space for an unbiased page map.
+	 * 2) Refine map_bias based on (1) to omit the header pages in the page
+	 *    map.  The resulting map_bias may be one too small.
+	 * 3) Refine map_bias based on (2).  The result will be >= the result
+	 *    from (2), and will always be correct.
+	 */
+	map_bias = 0;
+	for (i = 0; i < 3; i++) {
+		header_size = offsetof(arena_chunk_t, map) +
+		    (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
+		map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
+		    != 0);
+	}
+	assert(map_bias > 0);
+
+	arena_maxclass = chunksize - (map_bias << LG_PAGE);
+
+	bin_info_init();
+}
+
+void
+arena_prefork(arena_t *arena)
+{
+	unsigned i;
+
+	malloc_mutex_prefork(&arena->lock);
+	for (i = 0; i < NBINS; i++)
+		malloc_mutex_prefork(&arena->bins[i].lock);
+}
+
+void
+arena_postfork_parent(arena_t *arena)
+{
+	unsigned i;
+
+	for (i = 0; i < NBINS; i++)
+		malloc_mutex_postfork_parent(&arena->bins[i].lock);
+	malloc_mutex_postfork_parent(&arena->lock);
+}
+
+void
+arena_postfork_child(arena_t *arena)
+{
+	unsigned i;
+
+	for (i = 0; i < NBINS; i++)
+		malloc_mutex_postfork_child(&arena->bins[i].lock);
+	malloc_mutex_postfork_child(&arena->lock);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/atomic.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/atomic.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,2 @@
+#define	JEMALLOC_ATOMIC_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/base.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/base.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,138 @@
+#define	JEMALLOC_BASE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+static malloc_mutex_t	base_mtx;
+
+/*
+ * Current pages that are being used for internal memory allocations.  These
+ * pages are carved up in cacheline-size quanta, so that there is no chance of
+ * false cache line sharing.
+ */
+static void		*base_pages;
+static void		*base_next_addr;
+static void		*base_past_addr; /* Addr immediately past base_pages. */
+static extent_node_t	*base_nodes;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static bool	base_pages_alloc(size_t minsize);
+
+/******************************************************************************/
+
+static bool
+base_pages_alloc(size_t minsize)
+{
+	size_t csize;
+	bool zero;
+
+	assert(minsize != 0);
+	csize = CHUNK_CEILING(minsize);
+	zero = false;
+	base_pages = chunk_alloc(csize, chunksize, true, &zero);
+	if (base_pages == NULL)
+		return (true);
+	base_next_addr = base_pages;
+	base_past_addr = (void *)((uintptr_t)base_pages + csize);
+
+	return (false);
+}
+
+void *
+base_alloc(size_t size)
+{
+	void *ret;
+	size_t csize;
+
+	/* Round size up to nearest multiple of the cacheline size. */
+	csize = CACHELINE_CEILING(size);
+
+	malloc_mutex_lock(&base_mtx);
+	/* Make sure there's enough space for the allocation. */
+	if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
+		if (base_pages_alloc(csize)) {
+			malloc_mutex_unlock(&base_mtx);
+			return (NULL);
+		}
+	}
+	/* Allocate. */
+	ret = base_next_addr;
+	base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
+	malloc_mutex_unlock(&base_mtx);
+
+	return (ret);
+}
+
+void *
+base_calloc(size_t number, size_t size)
+{
+	void *ret = base_alloc(number * size);
+
+	if (ret != NULL)
+		memset(ret, 0, number * size);
+
+	return (ret);
+}
+
+extent_node_t *
+base_node_alloc(void)
+{
+	extent_node_t *ret;
+
+	malloc_mutex_lock(&base_mtx);
+	if (base_nodes != NULL) {
+		ret = base_nodes;
+		base_nodes = *(extent_node_t **)ret;
+		malloc_mutex_unlock(&base_mtx);
+	} else {
+		malloc_mutex_unlock(&base_mtx);
+		ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
+	}
+
+	return (ret);
+}
+
+void
+base_node_dealloc(extent_node_t *node)
+{
+
+	malloc_mutex_lock(&base_mtx);
+	*(extent_node_t **)node = base_nodes;
+	base_nodes = node;
+	malloc_mutex_unlock(&base_mtx);
+}
+
+bool
+base_boot(void)
+{
+
+	base_nodes = NULL;
+	if (malloc_mutex_init(&base_mtx))
+		return (true);
+
+	return (false);
+}
+
+void
+base_prefork(void)
+{
+
+	malloc_mutex_prefork(&base_mtx);
+}
+
+void
+base_postfork_parent(void)
+{
+
+	malloc_mutex_postfork_parent(&base_mtx);
+}
+
+void
+base_postfork_child(void)
+{
+
+	malloc_mutex_postfork_child(&base_mtx);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/bitmap.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/bitmap.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,90 @@
+#define JEMALLOC_BITMAP_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static size_t	bits2groups(size_t nbits);
+
+/******************************************************************************/
+
+static size_t
+bits2groups(size_t nbits)
+{
+
+	return ((nbits >> LG_BITMAP_GROUP_NBITS) +
+	    !!(nbits & BITMAP_GROUP_NBITS_MASK));
+}
+
+void
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
+{
+	unsigned i;
+	size_t group_count;
+
+	assert(nbits > 0);
+	assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
+
+	/*
+	 * Compute the number of groups necessary to store nbits bits, and
+	 * progressively work upward through the levels until reaching a level
+	 * that requires only one group.
+	 */
+	binfo->levels[0].group_offset = 0;
+	group_count = bits2groups(nbits);
+	for (i = 1; group_count > 1; i++) {
+		assert(i < BITMAP_MAX_LEVELS);
+		binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+		    + group_count;
+		group_count = bits2groups(group_count);
+	}
+	binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+	    + group_count;
+	binfo->nlevels = i;
+	binfo->nbits = nbits;
+}
+
+size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo)
+{
+
+	return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
+}
+
+size_t
+bitmap_size(size_t nbits)
+{
+	bitmap_info_t binfo;
+
+	bitmap_info_init(&binfo, nbits);
+	return (bitmap_info_ngroups(&binfo));
+}
+
+void
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+	size_t extra;
+	unsigned i;
+
+	/*
+	 * Bits are actually inverted with regard to the external bitmap
+	 * interface, so the bitmap starts out with all 1 bits, except for
+	 * trailing unused bits (if any).  Note that each group uses bit 0 to
+	 * correspond to the first logical bit in the group, so extra bits
+	 * are the most significant bits of the last group.
+	 */
+	memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
+	    LG_SIZEOF_BITMAP);
+	extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
+	    & BITMAP_GROUP_NBITS_MASK;
+	if (extra != 0)
+		bitmap[binfo->levels[1].group_offset - 1] >>= extra;
+	for (i = 1; i < binfo->nlevels; i++) {
+		size_t group_count = binfo->levels[i].group_offset -
+		    binfo->levels[i-1].group_offset;
+		extra = (BITMAP_GROUP_NBITS - (group_count &
+		    BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
+		if (extra != 0)
+			bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
+	}
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/chunk.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/chunk.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,304 @@
+#define	JEMALLOC_CHUNK_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+size_t	opt_lg_chunk = LG_CHUNK_DEFAULT;
+
+malloc_mutex_t	chunks_mtx;
+chunk_stats_t	stats_chunks;
+
+/*
+ * Trees of chunks that were previously allocated (trees differ only in node
+ * ordering).  These are used when allocating chunks, in an attempt to re-use
+ * address space.  Depending on function, different tree orderings are needed,
+ * which is why there are two trees with the same contents.
+ */
+static extent_tree_t	chunks_szad;
+static extent_tree_t	chunks_ad;
+
+rtree_t		*chunks_rtree;
+
+/* Various chunk-related settings. */
+size_t		chunksize;
+size_t		chunksize_mask; /* (chunksize - 1). */
+size_t		chunk_npages;
+size_t		map_bias;
+size_t		arena_maxclass; /* Max size class for arenas. */
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void	*chunk_recycle(size_t size, size_t alignment, bool *zero);
+static void	chunk_record(void *chunk, size_t size);
+
+/******************************************************************************/
+
+static void *
+chunk_recycle(size_t size, size_t alignment, bool *zero)
+{
+	void *ret;
+	extent_node_t *node;
+	extent_node_t key;
+	size_t alloc_size, leadsize, trailsize;
+
+	alloc_size = size + alignment - chunksize;
+	/* Beware size_t wrap-around. */
+	if (alloc_size < size)
+		return (NULL);
+	key.addr = NULL;
+	key.size = alloc_size;
+	malloc_mutex_lock(&chunks_mtx);
+	node = extent_tree_szad_nsearch(&chunks_szad, &key);
+	if (node == NULL) {
+		malloc_mutex_unlock(&chunks_mtx);
+		return (NULL);
+	}
+	leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
+	    (uintptr_t)node->addr;
+	assert(alloc_size >= leadsize + size);
+	trailsize = alloc_size - leadsize - size;
+	ret = (void *)((uintptr_t)node->addr + leadsize);
+	/* Remove node from the tree. */
+	extent_tree_szad_remove(&chunks_szad, node);
+	extent_tree_ad_remove(&chunks_ad, node);
+	if (leadsize != 0) {
+		/* Insert the leading space as a smaller chunk. */
+		node->size = leadsize;
+		extent_tree_szad_insert(&chunks_szad, node);
+		extent_tree_ad_insert(&chunks_ad, node);
+		node = NULL;
+	}
+	if (trailsize != 0) {
+		/* Insert the trailing space as a smaller chunk. */
+		if (node == NULL) {
+			/*
+			 * An additional node is required, but
+			 * base_node_alloc() can cause a new base chunk to be
+			 * allocated.  Drop chunks_mtx in order to avoid
+			 * deadlock, and if node allocation fails, deallocate
+			 * the result before returning an error.
+			 */
+			malloc_mutex_unlock(&chunks_mtx);
+			node = base_node_alloc();
+			if (node == NULL) {
+				chunk_dealloc(ret, size, true);
+				return (NULL);
+			}
+			malloc_mutex_lock(&chunks_mtx);
+		}
+		node->addr = (void *)((uintptr_t)(ret) + size);
+		node->size = trailsize;
+		extent_tree_szad_insert(&chunks_szad, node);
+		extent_tree_ad_insert(&chunks_ad, node);
+		node = NULL;
+	}
+	malloc_mutex_unlock(&chunks_mtx);
+
+	if (node != NULL)
+		base_node_dealloc(node);
+#ifdef JEMALLOC_PURGE_MADVISE_FREE
+	if (*zero) {
+		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+		memset(ret, 0, size);
+	}
+#endif
+	return (ret);
+}
+
+/*
+ * If the caller specifies (*zero == false), it is still possible to receive
+ * zeroed memory, in which case *zero is toggled to true.  arena_chunk_alloc()
+ * takes advantage of this to avoid demanding zeroed chunks, but taking
+ * advantage of them if they are returned.
+ */
+void *
+chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
+{
+	void *ret;
+
+	assert(size != 0);
+	assert((size & chunksize_mask) == 0);
+	assert((alignment & chunksize_mask) == 0);
+
+	ret = chunk_recycle(size, alignment, zero);
+	if (ret != NULL)
+		goto label_return;
+	if (config_dss) {
+		ret = chunk_alloc_dss(size, alignment, zero);
+		if (ret != NULL)
+			goto label_return;
+	}
+	ret = chunk_alloc_mmap(size, alignment);
+	if (ret != NULL) {
+		*zero = true;
+		goto label_return;
+	}
+
+	/* All strategies for allocation failed. */
+	ret = NULL;
+label_return:
+	if (config_ivsalloc && base == false && ret != NULL) {
+		if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
+			chunk_dealloc(ret, size, true);
+			return (NULL);
+		}
+	}
+	if ((config_stats || config_prof) && ret != NULL) {
+		bool gdump;
+		malloc_mutex_lock(&chunks_mtx);
+		if (config_stats)
+			stats_chunks.nchunks += (size / chunksize);
+		stats_chunks.curchunks += (size / chunksize);
+		if (stats_chunks.curchunks > stats_chunks.highchunks) {
+			stats_chunks.highchunks = stats_chunks.curchunks;
+			if (config_prof)
+				gdump = true;
+		} else if (config_prof)
+			gdump = false;
+		malloc_mutex_unlock(&chunks_mtx);
+		if (config_prof && opt_prof && opt_prof_gdump && gdump)
+			prof_gdump();
+	}
+
+	assert(CHUNK_ADDR2BASE(ret) == ret);
+	return (ret);
+}
+
+static void
+chunk_record(void *chunk, size_t size)
+{
+	extent_node_t *xnode, *node, *prev, key;
+
+	madvise(chunk, size, JEMALLOC_MADV_PURGE);
+
+	xnode = NULL;
+	malloc_mutex_lock(&chunks_mtx);
+	while (true) {
+		key.addr = (void *)((uintptr_t)chunk + size);
+		node = extent_tree_ad_nsearch(&chunks_ad, &key);
+		/* Try to coalesce forward. */
+		if (node != NULL && node->addr == key.addr) {
+			/*
+			 * Coalesce chunk with the following address range.
+			 * This does not change the position within chunks_ad,
+			 * so only remove/insert from/into chunks_szad.
+			 */
+			extent_tree_szad_remove(&chunks_szad, node);
+			node->addr = chunk;
+			node->size += size;
+			extent_tree_szad_insert(&chunks_szad, node);
+			break;
+		} else if (xnode == NULL) {
+			/*
+			 * It is possible that base_node_alloc() will cause a
+			 * new base chunk to be allocated, so take care not to
+			 * deadlock on chunks_mtx, and recover if another thread
+			 * deallocates an adjacent chunk while this one is busy
+			 * allocating xnode.
+			 */
+			malloc_mutex_unlock(&chunks_mtx);
+			xnode = base_node_alloc();
+			if (xnode == NULL)
+				return;
+			malloc_mutex_lock(&chunks_mtx);
+		} else {
+			/* Coalescing forward failed, so insert a new node. */
+			node = xnode;
+			xnode = NULL;
+			node->addr = chunk;
+			node->size = size;
+			extent_tree_ad_insert(&chunks_ad, node);
+			extent_tree_szad_insert(&chunks_szad, node);
+			break;
+		}
+	}
+	/* Discard xnode if it ended up unused due to a race. */
+	if (xnode != NULL)
+		base_node_dealloc(xnode);
+
+	/* Try to coalesce backward. */
+	prev = extent_tree_ad_prev(&chunks_ad, node);
+	if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
+	    chunk) {
+		/*
+		 * Coalesce chunk with the previous address range.  This does
+		 * not change the position within chunks_ad, so only
+		 * remove/insert node from/into chunks_szad.
+		 */
+		extent_tree_szad_remove(&chunks_szad, prev);
+		extent_tree_ad_remove(&chunks_ad, prev);
+
+		extent_tree_szad_remove(&chunks_szad, node);
+		node->addr = prev->addr;
+		node->size += prev->size;
+		extent_tree_szad_insert(&chunks_szad, node);
+
+		base_node_dealloc(prev);
+	}
+	malloc_mutex_unlock(&chunks_mtx);
+}
+
+void
+chunk_dealloc(void *chunk, size_t size, bool unmap)
+{
+
+	assert(chunk != NULL);
+	assert(CHUNK_ADDR2BASE(chunk) == chunk);
+	assert(size != 0);
+	assert((size & chunksize_mask) == 0);
+
+	if (config_ivsalloc)
+		rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
+	if (config_stats || config_prof) {
+		malloc_mutex_lock(&chunks_mtx);
+		stats_chunks.curchunks -= (size / chunksize);
+		malloc_mutex_unlock(&chunks_mtx);
+	}
+
+	if (unmap) {
+		if (chunk_dealloc_mmap(chunk, size) == false)
+			return;
+		chunk_record(chunk, size);
+	}
+}
+
+bool
+chunk_boot0(void)
+{
+
+	/* Set variables according to the value of opt_lg_chunk. */
+	chunksize = (ZU(1) << opt_lg_chunk);
+	assert(chunksize >= PAGE);
+	chunksize_mask = chunksize - 1;
+	chunk_npages = (chunksize >> LG_PAGE);
+
+	if (config_stats || config_prof) {
+		if (malloc_mutex_init(&chunks_mtx))
+			return (true);
+		memset(&stats_chunks, 0, sizeof(chunk_stats_t));
+	}
+	if (config_dss && chunk_dss_boot())
+		return (true);
+	extent_tree_szad_new(&chunks_szad);
+	extent_tree_ad_new(&chunks_ad);
+	if (config_ivsalloc) {
+		chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
+		    opt_lg_chunk);
+		if (chunks_rtree == NULL)
+			return (true);
+	}
+
+	return (false);
+}
+
+bool
+chunk_boot1(void)
+{
+
+	if (chunk_mmap_boot())
+		return (true);
+
+	return (false);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/chunk_dss.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/chunk_dss.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,159 @@
+#define	JEMALLOC_CHUNK_DSS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+/******************************************************************************/
+/* Data. */
+
+/*
+ * Protects sbrk() calls.  This avoids malloc races among threads, though it
+ * does not protect against races with threads that call sbrk() directly.
+ */
+static malloc_mutex_t	dss_mtx;
+
+/* Base address of the DSS. */
+static void		*dss_base;
+/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
+static void		*dss_prev;
+/* Current upper limit on DSS addresses. */
+static void		*dss_max;
+
+/******************************************************************************/
+
+#ifndef JEMALLOC_HAVE_SBRK
+static void *
+sbrk(intptr_t increment)
+{
+
+	not_implemented();
+
+	return (NULL);
+}
+#endif
+
+void *
+chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
+{
+	void *ret;
+
+	cassert(config_dss);
+	assert(size > 0 && (size & chunksize_mask) == 0);
+	assert(alignment > 0 && (alignment & chunksize_mask) == 0);
+
+	/*
+	 * sbrk() uses a signed increment argument, so take care not to
+	 * interpret a huge allocation request as a negative increment.
+	 */
+	if ((intptr_t)size < 0)
+		return (NULL);
+
+	malloc_mutex_lock(&dss_mtx);
+	if (dss_prev != (void *)-1) {
+		size_t gap_size, cpad_size;
+		void *cpad, *dss_next;
+		intptr_t incr;
+
+		/*
+		 * The loop is necessary to recover from races with other
+		 * threads that are using the DSS for something other than
+		 * malloc.
+		 */
+		do {
+			/* Get the current end of the DSS. */
+			dss_max = sbrk(0);
+			/*
+			 * Calculate how much padding is necessary to
+			 * chunk-align the end of the DSS.
+			 */
+			gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
+			    chunksize_mask;
+			/*
+			 * Compute how much chunk-aligned pad space (if any) is
+			 * necessary to satisfy alignment.  This space can be
+			 * recycled for later use.
+			 */
+			cpad = (void *)((uintptr_t)dss_max + gap_size);
+			ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
+			    alignment);
+			cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
+			dss_next = (void *)((uintptr_t)ret + size);
+			if ((uintptr_t)ret < (uintptr_t)dss_max ||
+			    (uintptr_t)dss_next < (uintptr_t)dss_max) {
+				/* Wrap-around. */
+				malloc_mutex_unlock(&dss_mtx);
+				return (NULL);
+			}
+			incr = gap_size + cpad_size + size;
+			dss_prev = sbrk(incr);
+			if (dss_prev == dss_max) {
+				/* Success. */
+				dss_max = dss_next;
+				malloc_mutex_unlock(&dss_mtx);
+				if (cpad_size != 0)
+					chunk_dealloc(cpad, cpad_size, true);
+				*zero = true;
+				return (ret);
+			}
+		} while (dss_prev != (void *)-1);
+	}
+	malloc_mutex_unlock(&dss_mtx);
+
+	return (NULL);
+}
+
+bool
+chunk_in_dss(void *chunk)
+{
+	bool ret;
+
+	cassert(config_dss);
+
+	malloc_mutex_lock(&dss_mtx);
+	if ((uintptr_t)chunk >= (uintptr_t)dss_base
+	    && (uintptr_t)chunk < (uintptr_t)dss_max)
+		ret = true;
+	else
+		ret = false;
+	malloc_mutex_unlock(&dss_mtx);
+
+	return (ret);
+}
+
+bool
+chunk_dss_boot(void)
+{
+
+	cassert(config_dss);
+
+	if (malloc_mutex_init(&dss_mtx))
+		return (true);
+	dss_base = sbrk(0);
+	dss_prev = dss_base;
+	dss_max = dss_base;
+
+	return (false);
+}
+
+void
+chunk_dss_prefork(void)
+{
+
+	if (config_dss)
+		malloc_mutex_prefork(&dss_mtx);
+}
+
+void
+chunk_dss_postfork_parent(void)
+{
+
+	if (config_dss)
+		malloc_mutex_postfork_parent(&dss_mtx);
+}
+
+void
+chunk_dss_postfork_child(void)
+{
+
+	if (config_dss)
+		malloc_mutex_postfork_child(&dss_mtx);
+}
+
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/chunk_mmap.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/chunk_mmap.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,207 @@
+#define	JEMALLOC_CHUNK_MMAP_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+/*
+ * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
+ * potentially avoid some system calls.
+ */
+malloc_tsd_data(static, mmap_unaligned, bool, false)
+malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
+    malloc_tsd_no_cleanup)
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void	*pages_map(void *addr, size_t size);
+static void	pages_unmap(void *addr, size_t size);
+static void	*chunk_alloc_mmap_slow(size_t size, size_t alignment,
+    bool unaligned);
+
+/******************************************************************************/
+
+static void *
+pages_map(void *addr, size_t size)
+{
+	void *ret;
+
+	/*
+	 * We don't use MAP_FIXED here, because it can cause the *replacement*
+	 * of existing mappings, and we only want to create new mappings.
+	 */
+	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+	    -1, 0);
+	assert(ret != NULL);
+
+	if (ret == MAP_FAILED)
+		ret = NULL;
+	else if (addr != NULL && ret != addr) {
+		/*
+		 * We succeeded in mapping memory, but not in the right place.
+		 */
+		if (munmap(ret, size) == -1) {
+			char buf[BUFERROR_BUF];
+
+			buferror(errno, buf, sizeof(buf));
+			malloc_printf("<jemalloc: Error in munmap(): %s\n",
+			    buf);
+			if (opt_abort)
+				abort();
+		}
+		ret = NULL;
+	}
+
+	assert(ret == NULL || (addr == NULL && ret != addr)
+	    || (addr != NULL && ret == addr));
+	return (ret);
+}
+
+static void
+pages_unmap(void *addr, size_t size)
+{
+
+	if (munmap(addr, size) == -1) {
+		char buf[BUFERROR_BUF];
+
+		buferror(errno, buf, sizeof(buf));
+		malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf);
+		if (opt_abort)
+			abort();
+	}
+}
+
+static void *
+chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
+{
+	void *ret, *pages;
+	size_t alloc_size, leadsize, trailsize;
+
+	alloc_size = size + alignment - PAGE;
+	/* Beware size_t wrap-around. */
+	if (alloc_size < size)
+		return (NULL);
+	pages = pages_map(NULL, alloc_size);
+	if (pages == NULL)
+		return (NULL);
+	leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
+	    (uintptr_t)pages;
+	assert(alloc_size >= leadsize + size);
+	trailsize = alloc_size - leadsize - size;
+	ret = (void *)((uintptr_t)pages + leadsize);
+	if (leadsize != 0) {
+		/* Note that mmap() returned an unaligned mapping. */
+		unaligned = true;
+		pages_unmap(pages, leadsize);
+	}
+	if (trailsize != 0)
+		pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+
+	/*
+	 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
+	 * the next chunk_alloc_mmap() execution tries the fast allocation
+	 * method.
+	 */
+	if (unaligned == false && mmap_unaligned_booted) {
+		bool mu = false;
+		mmap_unaligned_tsd_set(&mu);
+	}
+
+	return (ret);
+}
+
+void *
+chunk_alloc_mmap(size_t size, size_t alignment)
+{
+	void *ret;
+
+	/*
+	 * Ideally, there would be a way to specify alignment to mmap() (like
+	 * NetBSD has), but in the absence of such a feature, we have to work
+	 * hard to efficiently create aligned mappings.  The reliable, but
+	 * slow method is to create a mapping that is over-sized, then trim the
+	 * excess.  However, that always results in at least one call to
+	 * pages_unmap().
+	 *
+	 * A more optimistic approach is to try mapping precisely the right
+	 * amount, then try to append another mapping if alignment is off.  In
+	 * practice, this works out well as long as the application is not
+	 * interleaving mappings via direct mmap() calls.  If we do run into a
+	 * situation where there is an interleaved mapping and we are unable to
+	 * extend an unaligned mapping, our best option is to switch to the
+	 * slow method until mmap() returns another aligned mapping.  This will
+	 * tend to leave a gap in the memory map that is too small to cause
+	 * later problems for the optimistic method.
+	 *
+	 * Another possible confounding factor is address space layout
+	 * randomization (ASLR), which causes mmap(2) to disregard the
+	 * requested address.  mmap_unaligned tracks whether the previous
+	 * chunk_alloc_mmap() execution received any unaligned or relocated
+	 * mappings, and if so, the current execution will immediately fall
+	 * back to the slow method.  However, we keep track of whether the fast
+	 * method would have succeeded, and if so, we make a note to try the
+	 * fast method next time.
+	 */
+
+	if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
+		size_t offset;
+
+		ret = pages_map(NULL, size);
+		if (ret == NULL)
+			return (NULL);
+
+		offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
+		if (offset != 0) {
+			bool mu = true;
+			mmap_unaligned_tsd_set(&mu);
+			/* Try to extend chunk boundary. */
+			if (pages_map((void *)((uintptr_t)ret + size),
+			    chunksize - offset) == NULL) {
+				/*
+				 * Extension failed.  Clean up, then revert to
+				 * the reliable-but-expensive method.
+				 */
+				pages_unmap(ret, size);
+				ret = chunk_alloc_mmap_slow(size, alignment,
+				    true);
+			} else {
+				/* Clean up unneeded leading space. */
+				pages_unmap(ret, chunksize - offset);
+				ret = (void *)((uintptr_t)ret + (chunksize -
+				    offset));
+			}
+		}
+	} else
+		ret = chunk_alloc_mmap_slow(size, alignment, false);
+
+	return (ret);
+}
+
+bool
+chunk_dealloc_mmap(void *chunk, size_t size)
+{
+
+	if (config_munmap)
+		pages_unmap(chunk, size);
+
+	return (config_munmap == false);
+}
+
+bool
+chunk_mmap_boot(void)
+{
+
+	/*
+	 * XXX For the non-TLS implementation of tsd, the first access from
+	 * each thread causes memory allocation.  The result is a bootstrapping
+	 * problem for this particular use case, so for now just disable it by
+	 * leaving it in an unbooted state.
+	 */
+#ifdef JEMALLOC_TLS
+	if (mmap_unaligned_tsd_boot())
+		return (true);
+#endif
+
+	return (false);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/ckh.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/ckh.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,609 @@
+/*
+ *******************************************************************************
+ * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
+ * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
+ * functions are employed.  The original cuckoo hashing algorithm was described
+ * in:
+ *
+ *   Pagh, R., F.F. Rodler (2004) Cuckoo Hashing.  Journal of Algorithms
+ *     51(2):122-144.
+ *
+ * Generalization of cuckoo hashing was discussed in:
+ *
+ *   Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
+ *     alternative to traditional hash tables.  In Proceedings of the 7th
+ *     Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
+ *     January 2006.
+ *
+ * This implementation uses precisely two hash functions because that is the
+ * fewest that can work, and supporting multiple hashes is an implementation
+ * burden.  Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
+ * that shows approximate expected maximum load factors for various
+ * configurations:
+ *
+ *           |         #cells/bucket         |
+ *   #hashes |   1   |   2   |   4   |   8   |
+ *   --------+-------+-------+-------+-------+
+ *         1 | 0.006 | 0.006 | 0.03  | 0.12  |
+ *         2 | 0.49  | 0.86  |>0.93< |>0.96< |
+ *         3 | 0.91  | 0.97  | 0.98  | 0.999 |
+ *         4 | 0.97  | 0.99  | 0.999 |       |
+ *
+ * The number of cells per bucket is chosen such that a bucket fits in one cache
+ * line.  So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
+ * respectively.
+ *
+ ******************************************************************************/
+#define	JEMALLOC_CKH_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static bool	ckh_grow(ckh_t *ckh);
+static void	ckh_shrink(ckh_t *ckh);
+
+/******************************************************************************/
+
+/*
+ * Search bucket for key and return the cell number if found; SIZE_T_MAX
+ * otherwise.
+ */
+JEMALLOC_INLINE size_t
+ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
+{
+	ckhc_t *cell;
+	unsigned i;
+
+	for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
+		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
+		if (cell->key != NULL && ckh->keycomp(key, cell->key))
+			return ((bucket << LG_CKH_BUCKET_CELLS) + i);
+	}
+
+	return (SIZE_T_MAX);
+}
+
+/*
+ * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
+ */
+JEMALLOC_INLINE size_t
+ckh_isearch(ckh_t *ckh, const void *key)
+{
+	size_t hash1, hash2, bucket, cell;
+
+	assert(ckh != NULL);
+
+	ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+
+	/* Search primary bucket. */
+	bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+	cell = ckh_bucket_search(ckh, bucket, key);
+	if (cell != SIZE_T_MAX)
+		return (cell);
+
+	/* Search secondary bucket. */
+	bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+	cell = ckh_bucket_search(ckh, bucket, key);
+	return (cell);
+}
+
+JEMALLOC_INLINE bool
+ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
+    const void *data)
+{
+	ckhc_t *cell;
+	unsigned offset, i;
+
+	/*
+	 * Cycle through the cells in the bucket, starting at a random position.
+	 * The randomness avoids worst-case search overhead as buckets fill up.
+	 */
+	prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
+	for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
+		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
+		    ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
+		if (cell->key == NULL) {
+			cell->key = key;
+			cell->data = data;
+			ckh->count++;
+			return (false);
+		}
+	}
+
+	return (true);
+}
+
+/*
+ * No space is available in bucket.  Randomly evict an item, then try to find an
+ * alternate location for that item.  Iteratively repeat this
+ * eviction/relocation procedure until either success or detection of an
+ * eviction/relocation bucket cycle.
+ */
+JEMALLOC_INLINE bool
+ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
+    void const **argdata)
+{
+	const void *key, *data, *tkey, *tdata;
+	ckhc_t *cell;
+	size_t hash1, hash2, bucket, tbucket;
+	unsigned i;
+
+	bucket = argbucket;
+	key = *argkey;
+	data = *argdata;
+	while (true) {
+		/*
+		 * Choose a random item within the bucket to evict.  This is
+		 * critical to correct function, because without (eventually)
+		 * evicting all items within a bucket during iteration, it
+		 * would be possible to get stuck in an infinite loop if there
+		 * were an item for which both hashes indicated the same
+		 * bucket.
+		 */
+		prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
+		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
+		assert(cell->key != NULL);
+
+		/* Swap cell->{key,data} and {key,data} (evict). */
+		tkey = cell->key; tdata = cell->data;
+		cell->key = key; cell->data = data;
+		key = tkey; data = tdata;
+
+#ifdef CKH_COUNT
+		ckh->nrelocs++;
+#endif
+
+		/* Find the alternate bucket for the evicted item. */
+		ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+		tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+		if (tbucket == bucket) {
+			tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+			/*
+			 * It may be that (tbucket == bucket) still, if the
+			 * item's hashes both indicate this bucket.  However,
+			 * we are guaranteed to eventually escape this bucket
+			 * during iteration, assuming pseudo-random item
+			 * selection (true randomness would make infinite
+			 * looping a remote possibility).  The reason we can
+			 * never get trapped forever is that there are two
+			 * cases:
+			 *
+			 * 1) This bucket == argbucket, so we will quickly
+			 *    detect an eviction cycle and terminate.
+			 * 2) An item was evicted to this bucket from another,
+			 *    which means that at least one item in this bucket
+			 *    has hashes that indicate distinct buckets.
+			 */
+		}
+		/* Check for a cycle. */
+		if (tbucket == argbucket) {
+			*argkey = key;
+			*argdata = data;
+			return (true);
+		}
+
+		bucket = tbucket;
+		if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+			return (false);
+	}
+}
+
+JEMALLOC_INLINE bool
+ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
+{
+	size_t hash1, hash2, bucket;
+	const void *key = *argkey;
+	const void *data = *argdata;
+
+	ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+
+	/* Try to insert in primary bucket. */
+	bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+	if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+		return (false);
+
+	/* Try to insert in secondary bucket. */
+	bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+	if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+		return (false);
+
+	/*
+	 * Try to find a place for this item via iterative eviction/relocation.
+	 */
+	return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
+}
+
+/*
+ * Try to rebuild the hash table from scratch by inserting all items from the
+ * old table into the new.
+ */
+JEMALLOC_INLINE bool
+ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
+{
+	size_t count, i, nins;
+	const void *key, *data;
+
+	count = ckh->count;
+	ckh->count = 0;
+	for (i = nins = 0; nins < count; i++) {
+		if (aTab[i].key != NULL) {
+			key = aTab[i].key;
+			data = aTab[i].data;
+			if (ckh_try_insert(ckh, &key, &data)) {
+				ckh->count = count;
+				return (true);
+			}
+			nins++;
+		}
+	}
+
+	return (false);
+}
+
+static bool
+ckh_grow(ckh_t *ckh)
+{
+	bool ret;
+	ckhc_t *tab, *ttab;
+	size_t lg_curcells;
+	unsigned lg_prevbuckets;
+
+#ifdef CKH_COUNT
+	ckh->ngrows++;
+#endif
+
+	/*
+	 * It is possible (though unlikely, given well behaved hashes) that the
+	 * table will have to be doubled more than once in order to create a
+	 * usable table.
+	 */
+	lg_prevbuckets = ckh->lg_curbuckets;
+	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
+	while (true) {
+		size_t usize;
+
+		lg_curcells++;
+		usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+		if (usize == 0) {
+			ret = true;
+			goto label_return;
+		}
+		tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+		if (tab == NULL) {
+			ret = true;
+			goto label_return;
+		}
+		/* Swap in new table. */
+		ttab = ckh->tab;
+		ckh->tab = tab;
+		tab = ttab;
+		ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
+
+		if (ckh_rebuild(ckh, tab) == false) {
+			idalloc(tab);
+			break;
+		}
+
+		/* Rebuilding failed, so back out partially rebuilt table. */
+		idalloc(ckh->tab);
+		ckh->tab = tab;
+		ckh->lg_curbuckets = lg_prevbuckets;
+	}
+
+	ret = false;
+label_return:
+	return (ret);
+}
+
+static void
+ckh_shrink(ckh_t *ckh)
+{
+	ckhc_t *tab, *ttab;
+	size_t lg_curcells, usize;
+	unsigned lg_prevbuckets;
+
+	/*
+	 * It is possible (though unlikely, given well behaved hashes) that the
+	 * table rebuild will fail.
+	 */
+	lg_prevbuckets = ckh->lg_curbuckets;
+	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
+	usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+	if (usize == 0)
+		return;
+	tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+	if (tab == NULL) {
+		/*
+		 * An OOM error isn't worth propagating, since it doesn't
+		 * prevent this or future operations from proceeding.
+		 */
+		return;
+	}
+	/* Swap in new table. */
+	ttab = ckh->tab;
+	ckh->tab = tab;
+	tab = ttab;
+	ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
+
+	if (ckh_rebuild(ckh, tab) == false) {
+		idalloc(tab);
+#ifdef CKH_COUNT
+		ckh->nshrinks++;
+#endif
+		return;
+	}
+
+	/* Rebuilding failed, so back out partially rebuilt table. */
+	idalloc(ckh->tab);
+	ckh->tab = tab;
+	ckh->lg_curbuckets = lg_prevbuckets;
+#ifdef CKH_COUNT
+	ckh->nshrinkfails++;
+#endif
+}
+
+bool
+ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
+{
+	bool ret;
+	size_t mincells, usize;
+	unsigned lg_mincells;
+
+	assert(minitems > 0);
+	assert(hash != NULL);
+	assert(keycomp != NULL);
+
+#ifdef CKH_COUNT
+	ckh->ngrows = 0;
+	ckh->nshrinks = 0;
+	ckh->nshrinkfails = 0;
+	ckh->ninserts = 0;
+	ckh->nrelocs = 0;
+#endif
+	ckh->prng_state = 42; /* Value doesn't really matter. */
+	ckh->count = 0;
+
+	/*
+	 * Find the minimum power of 2 that is large enough to fit aBaseCount
+	 * entries.  We are using (2+,2) cuckoo hashing, which has an expected
+	 * maximum load factor of at least ~0.86, so 0.75 is a conservative load
+	 * factor that will typically allow 2^aLgMinItems to fit without ever
+	 * growing the table.
+	 */
+	assert(LG_CKH_BUCKET_CELLS > 0);
+	mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
+	for (lg_mincells = LG_CKH_BUCKET_CELLS;
+	    (ZU(1) << lg_mincells) < mincells;
+	    lg_mincells++)
+		; /* Do nothing. */
+	ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
+	ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
+	ckh->hash = hash;
+	ckh->keycomp = keycomp;
+
+	usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
+	if (usize == 0) {
+		ret = true;
+		goto label_return;
+	}
+	ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+	if (ckh->tab == NULL) {
+		ret = true;
+		goto label_return;
+	}
+
+	ret = false;
+label_return:
+	return (ret);
+}
+
+void
+ckh_delete(ckh_t *ckh)
+{
+
+	assert(ckh != NULL);
+
+#ifdef CKH_VERBOSE
+	malloc_printf(
+	    "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
+	    " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
+	    " nrelocs: %"PRIu64"\n", __func__, ckh,
+	    (unsigned long long)ckh->ngrows,
+	    (unsigned long long)ckh->nshrinks,
+	    (unsigned long long)ckh->nshrinkfails,
+	    (unsigned long long)ckh->ninserts,
+	    (unsigned long long)ckh->nrelocs);
+#endif
+
+	idalloc(ckh->tab);
+#ifdef JEMALLOC_DEBUG
+	memset(ckh, 0x5a, sizeof(ckh_t));
+#endif
+}
+
+size_t
+ckh_count(ckh_t *ckh)
+{
+
+	assert(ckh != NULL);
+
+	return (ckh->count);
+}
+
+bool
+ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
+{
+	size_t i, ncells;
+
+	for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
+	    LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
+		if (ckh->tab[i].key != NULL) {
+			if (key != NULL)
+				*key = (void *)ckh->tab[i].key;
+			if (data != NULL)
+				*data = (void *)ckh->tab[i].data;
+			*tabind = i + 1;
+			return (false);
+		}
+	}
+
+	return (true);
+}
+
+bool
+ckh_insert(ckh_t *ckh, const void *key, const void *data)
+{
+	bool ret;
+
+	assert(ckh != NULL);
+	assert(ckh_search(ckh, key, NULL, NULL));
+
+#ifdef CKH_COUNT
+	ckh->ninserts++;
+#endif
+
+	while (ckh_try_insert(ckh, &key, &data)) {
+		if (ckh_grow(ckh)) {
+			ret = true;
+			goto label_return;
+		}
+	}
+
+	ret = false;
+label_return:
+	return (ret);
+}
+
+bool
+ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
+{
+	size_t cell;
+
+	assert(ckh != NULL);
+
+	cell = ckh_isearch(ckh, searchkey);
+	if (cell != SIZE_T_MAX) {
+		if (key != NULL)
+			*key = (void *)ckh->tab[cell].key;
+		if (data != NULL)
+			*data = (void *)ckh->tab[cell].data;
+		ckh->tab[cell].key = NULL;
+		ckh->tab[cell].data = NULL; /* Not necessary. */
+
+		ckh->count--;
+		/* Try to halve the table if it is less than 1/4 full. */
+		if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
+		    + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
+		    > ckh->lg_minbuckets) {
+			/* Ignore error due to OOM. */
+			ckh_shrink(ckh);
+		}
+
+		return (false);
+	}
+
+	return (true);
+}
+
+bool
+ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
+{
+	size_t cell;
+
+	assert(ckh != NULL);
+
+	cell = ckh_isearch(ckh, searchkey);
+	if (cell != SIZE_T_MAX) {
+		if (key != NULL)
+			*key = (void *)ckh->tab[cell].key;
+		if (data != NULL)
+			*data = (void *)ckh->tab[cell].data;
+		return (false);
+	}
+
+	return (true);
+}
+
+void
+ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
+{
+	size_t ret1, ret2;
+	uint64_t h;
+
+	assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
+	assert(hash1 != NULL);
+	assert(hash2 != NULL);
+
+	h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea));
+	if (minbits <= 32) {
+		/*
+		 * Avoid doing multiple hashes, since a single hash provides
+		 * enough bits.
+		 */
+		ret1 = h & ZU(0xffffffffU);
+		ret2 = h >> 32;
+	} else {
+		ret1 = h;
+		ret2 = hash(key, strlen((const char *)key),
+		    UINT64_C(0x8432a476666bbc13));
+	}
+
+	*hash1 = ret1;
+	*hash2 = ret2;
+}
+
+bool
+ckh_string_keycomp(const void *k1, const void *k2)
+{
+
+    assert(k1 != NULL);
+    assert(k2 != NULL);
+
+    return (strcmp((char *)k1, (char *)k2) ? false : true);
+}
+
+void
+ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
+    size_t *hash2)
+{
+	size_t ret1, ret2;
+	uint64_t h;
+	union {
+		const void	*v;
+		uint64_t	i;
+	} u;
+
+	assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
+	assert(hash1 != NULL);
+	assert(hash2 != NULL);
+
+	assert(sizeof(u.v) == sizeof(u.i));
+#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
+	u.i = 0;
+#endif
+	u.v = key;
+	h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082));
+	if (minbits <= 32) {
+		/*
+		 * Avoid doing multiple hashes, since a single hash provides
+		 * enough bits.
+		 */
+		ret1 = h & ZU(0xffffffffU);
+		ret2 = h >> 32;
+	} else {
+		assert(SIZEOF_PTR == 8);
+		ret1 = h;
+		ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d));
+	}
+
+	*hash1 = ret1;
+	*hash2 = ret2;
+}
+
+bool
+ckh_pointer_keycomp(const void *k1, const void *k2)
+{
+
+	return ((k1 == k2) ? true : false);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/ctl.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/ctl.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,1385 @@
+#define	JEMALLOC_CTL_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+/*
+ * ctl_mtx protects the following:
+ * - ctl_stats.*
+ * - opt_prof_active
+ */
+static malloc_mutex_t	ctl_mtx;
+static bool		ctl_initialized;
+static uint64_t		ctl_epoch;
+static ctl_stats_t	ctl_stats;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+#define	CTL_PROTO(n)							\
+static int	n##_ctl(const size_t *mib, size_t miblen, void *oldp,	\
+    size_t *oldlenp, void *newp, size_t newlen);
+
+#define	INDEX_PROTO(n)							\
+const ctl_node_t	*n##_index(const size_t *mib, size_t miblen,	\
+    size_t i);
+
+static bool	ctl_arena_init(ctl_arena_stats_t *astats);
+static void	ctl_arena_clear(ctl_arena_stats_t *astats);
+static void	ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
+    arena_t *arena);
+static void	ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
+    ctl_arena_stats_t *astats);
+static void	ctl_arena_refresh(arena_t *arena, unsigned i);
+static void	ctl_refresh(void);
+static bool	ctl_init(void);
+static int	ctl_lookup(const char *name, ctl_node_t const **nodesp,
+    size_t *mibp, size_t *depthp);
+
+CTL_PROTO(version)
+CTL_PROTO(epoch)
+CTL_PROTO(thread_tcache_enabled)
+CTL_PROTO(thread_tcache_flush)
+CTL_PROTO(thread_arena)
+CTL_PROTO(thread_allocated)
+CTL_PROTO(thread_allocatedp)
+CTL_PROTO(thread_deallocated)
+CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(config_debug)
+CTL_PROTO(config_dss)
+CTL_PROTO(config_fill)
+CTL_PROTO(config_lazy_lock)
+CTL_PROTO(config_munmap)
+CTL_PROTO(config_prof)
+CTL_PROTO(config_prof_libgcc)
+CTL_PROTO(config_prof_libunwind)
+CTL_PROTO(config_stats)
+CTL_PROTO(config_tcache)
+CTL_PROTO(config_tls)
+CTL_PROTO(config_utrace)
+CTL_PROTO(config_valgrind)
+CTL_PROTO(config_xmalloc)
+CTL_PROTO(opt_abort)
+CTL_PROTO(opt_lg_chunk)
+CTL_PROTO(opt_narenas)
+CTL_PROTO(opt_lg_dirty_mult)
+CTL_PROTO(opt_stats_print)
+CTL_PROTO(opt_junk)
+CTL_PROTO(opt_zero)
+CTL_PROTO(opt_quarantine)
+CTL_PROTO(opt_redzone)
+CTL_PROTO(opt_utrace)
+CTL_PROTO(opt_valgrind)
+CTL_PROTO(opt_xmalloc)
+CTL_PROTO(opt_tcache)
+CTL_PROTO(opt_lg_tcache_max)
+CTL_PROTO(opt_prof)
+CTL_PROTO(opt_prof_prefix)
+CTL_PROTO(opt_prof_active)
+CTL_PROTO(opt_lg_prof_sample)
+CTL_PROTO(opt_lg_prof_interval)
+CTL_PROTO(opt_prof_gdump)
+CTL_PROTO(opt_prof_leak)
+CTL_PROTO(opt_prof_accum)
+CTL_PROTO(arenas_bin_i_size)
+CTL_PROTO(arenas_bin_i_nregs)
+CTL_PROTO(arenas_bin_i_run_size)
+INDEX_PROTO(arenas_bin_i)
+CTL_PROTO(arenas_lrun_i_size)
+INDEX_PROTO(arenas_lrun_i)
+CTL_PROTO(arenas_narenas)
+CTL_PROTO(arenas_initialized)
+CTL_PROTO(arenas_quantum)
+CTL_PROTO(arenas_page)
+CTL_PROTO(arenas_tcache_max)
+CTL_PROTO(arenas_nbins)
+CTL_PROTO(arenas_nhbins)
+CTL_PROTO(arenas_nlruns)
+CTL_PROTO(arenas_purge)
+CTL_PROTO(prof_active)
+CTL_PROTO(prof_dump)
+CTL_PROTO(prof_interval)
+CTL_PROTO(stats_chunks_current)
+CTL_PROTO(stats_chunks_total)
+CTL_PROTO(stats_chunks_high)
+CTL_PROTO(stats_huge_allocated)
+CTL_PROTO(stats_huge_nmalloc)
+CTL_PROTO(stats_huge_ndalloc)
+CTL_PROTO(stats_arenas_i_small_allocated)
+CTL_PROTO(stats_arenas_i_small_nmalloc)
+CTL_PROTO(stats_arenas_i_small_ndalloc)
+CTL_PROTO(stats_arenas_i_small_nrequests)
+CTL_PROTO(stats_arenas_i_large_allocated)
+CTL_PROTO(stats_arenas_i_large_nmalloc)
+CTL_PROTO(stats_arenas_i_large_ndalloc)
+CTL_PROTO(stats_arenas_i_large_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_allocated)
+CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
+CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
+CTL_PROTO(stats_arenas_i_bins_j_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_nfills)
+CTL_PROTO(stats_arenas_i_bins_j_nflushes)
+CTL_PROTO(stats_arenas_i_bins_j_nruns)
+CTL_PROTO(stats_arenas_i_bins_j_nreruns)
+CTL_PROTO(stats_arenas_i_bins_j_curruns)
+INDEX_PROTO(stats_arenas_i_bins_j)
+CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
+CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
+CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
+CTL_PROTO(stats_arenas_i_lruns_j_curruns)
+INDEX_PROTO(stats_arenas_i_lruns_j)
+CTL_PROTO(stats_arenas_i_nthreads)
+CTL_PROTO(stats_arenas_i_pactive)
+CTL_PROTO(stats_arenas_i_pdirty)
+CTL_PROTO(stats_arenas_i_mapped)
+CTL_PROTO(stats_arenas_i_npurge)
+CTL_PROTO(stats_arenas_i_nmadvise)
+CTL_PROTO(stats_arenas_i_purged)
+INDEX_PROTO(stats_arenas_i)
+CTL_PROTO(stats_cactive)
+CTL_PROTO(stats_allocated)
+CTL_PROTO(stats_active)
+CTL_PROTO(stats_mapped)
+
+/******************************************************************************/
+/* mallctl tree. */
+
+/* Maximum tree depth. */
+#define	CTL_MAX_DEPTH	6
+
+#define	NAME(n)	true,	{.named = {n
+#define	CHILD(c) sizeof(c##_node) / sizeof(ctl_node_t),	c##_node}},	NULL
+#define	CTL(c)	0,				NULL}},		c##_ctl
+
+/*
+ * Only handles internal indexed nodes, since there are currently no external
+ * ones.
+ */
+#define	INDEX(i)	false,	{.indexed = {i##_index}},		NULL
+
+static const ctl_node_t	tcache_node[] = {
+	{NAME("enabled"),	CTL(thread_tcache_enabled)},
+	{NAME("flush"),		CTL(thread_tcache_flush)}
+};
+
+static const ctl_node_t	thread_node[] = {
+	{NAME("arena"),		CTL(thread_arena)},
+	{NAME("allocated"),	CTL(thread_allocated)},
+	{NAME("allocatedp"),	CTL(thread_allocatedp)},
+	{NAME("deallocated"),	CTL(thread_deallocated)},
+	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
+	{NAME("tcache"),	CHILD(tcache)}
+};
+
+static const ctl_node_t	config_node[] = {
+	{NAME("debug"),			CTL(config_debug)},
+	{NAME("dss"),			CTL(config_dss)},
+	{NAME("fill"),			CTL(config_fill)},
+	{NAME("lazy_lock"),		CTL(config_lazy_lock)},
+	{NAME("munmap"),		CTL(config_munmap)},
+	{NAME("prof"),			CTL(config_prof)},
+	{NAME("prof_libgcc"),		CTL(config_prof_libgcc)},
+	{NAME("prof_libunwind"),	CTL(config_prof_libunwind)},
+	{NAME("stats"),			CTL(config_stats)},
+	{NAME("tcache"),		CTL(config_tcache)},
+	{NAME("tls"),			CTL(config_tls)},
+	{NAME("utrace"),		CTL(config_utrace)},
+	{NAME("valgrind"),		CTL(config_valgrind)},
+	{NAME("xmalloc"),		CTL(config_xmalloc)}
+};
+
+static const ctl_node_t opt_node[] = {
+	{NAME("abort"),			CTL(opt_abort)},
+	{NAME("lg_chunk"),		CTL(opt_lg_chunk)},
+	{NAME("narenas"),		CTL(opt_narenas)},
+	{NAME("lg_dirty_mult"),		CTL(opt_lg_dirty_mult)},
+	{NAME("stats_print"),		CTL(opt_stats_print)},
+	{NAME("junk"),			CTL(opt_junk)},
+	{NAME("zero"),			CTL(opt_zero)},
+	{NAME("quarantine"),		CTL(opt_quarantine)},
+	{NAME("redzone"),		CTL(opt_redzone)},
+	{NAME("utrace"),		CTL(opt_utrace)},
+	{NAME("valgrind"),		CTL(opt_valgrind)},
+	{NAME("xmalloc"),		CTL(opt_xmalloc)},
+	{NAME("tcache"),		CTL(opt_tcache)},
+	{NAME("lg_tcache_max"),		CTL(opt_lg_tcache_max)},
+	{NAME("prof"),			CTL(opt_prof)},
+	{NAME("prof_prefix"),		CTL(opt_prof_prefix)},
+	{NAME("prof_active"),		CTL(opt_prof_active)},
+	{NAME("lg_prof_sample"),	CTL(opt_lg_prof_sample)},
+	{NAME("lg_prof_interval"),	CTL(opt_lg_prof_interval)},
+	{NAME("prof_gdump"),		CTL(opt_prof_gdump)},
+	{NAME("prof_leak"),		CTL(opt_prof_leak)},
+	{NAME("prof_accum"),		CTL(opt_prof_accum)}
+};
+
+static const ctl_node_t arenas_bin_i_node[] = {
+	{NAME("size"),			CTL(arenas_bin_i_size)},
+	{NAME("nregs"),			CTL(arenas_bin_i_nregs)},
+	{NAME("run_size"),		CTL(arenas_bin_i_run_size)}
+};
+static const ctl_node_t super_arenas_bin_i_node[] = {
+	{NAME(""),			CHILD(arenas_bin_i)}
+};
+
+static const ctl_node_t arenas_bin_node[] = {
+	{INDEX(arenas_bin_i)}
+};
+
+static const ctl_node_t arenas_lrun_i_node[] = {
+	{NAME("size"),			CTL(arenas_lrun_i_size)}
+};
+static const ctl_node_t super_arenas_lrun_i_node[] = {
+	{NAME(""),			CHILD(arenas_lrun_i)}
+};
+
+static const ctl_node_t arenas_lrun_node[] = {
+	{INDEX(arenas_lrun_i)}
+};
+
+static const ctl_node_t arenas_node[] = {
+	{NAME("narenas"),		CTL(arenas_narenas)},
+	{NAME("initialized"),		CTL(arenas_initialized)},
+	{NAME("quantum"),		CTL(arenas_quantum)},
+	{NAME("page"),			CTL(arenas_page)},
+	{NAME("tcache_max"),		CTL(arenas_tcache_max)},
+	{NAME("nbins"),			CTL(arenas_nbins)},
+	{NAME("nhbins"),		CTL(arenas_nhbins)},
+	{NAME("bin"),			CHILD(arenas_bin)},
+	{NAME("nlruns"),		CTL(arenas_nlruns)},
+	{NAME("lrun"),			CHILD(arenas_lrun)},
+	{NAME("purge"),			CTL(arenas_purge)}
+};
+
+static const ctl_node_t	prof_node[] = {
+	{NAME("active"),	CTL(prof_active)},
+	{NAME("dump"),		CTL(prof_dump)},
+	{NAME("interval"),	CTL(prof_interval)}
+};
+
+static const ctl_node_t stats_chunks_node[] = {
+	{NAME("current"),		CTL(stats_chunks_current)},
+	{NAME("total"),			CTL(stats_chunks_total)},
+	{NAME("high"),			CTL(stats_chunks_high)}
+};
+
+static const ctl_node_t stats_huge_node[] = {
+	{NAME("allocated"),		CTL(stats_huge_allocated)},
+	{NAME("nmalloc"),		CTL(stats_huge_nmalloc)},
+	{NAME("ndalloc"),		CTL(stats_huge_ndalloc)}
+};
+
+static const ctl_node_t stats_arenas_i_small_node[] = {
+	{NAME("allocated"),		CTL(stats_arenas_i_small_allocated)},
+	{NAME("nmalloc"),		CTL(stats_arenas_i_small_nmalloc)},
+	{NAME("ndalloc"),		CTL(stats_arenas_i_small_ndalloc)},
+	{NAME("nrequests"),		CTL(stats_arenas_i_small_nrequests)}
+};
+
+static const ctl_node_t stats_arenas_i_large_node[] = {
+	{NAME("allocated"),		CTL(stats_arenas_i_large_allocated)},
+	{NAME("nmalloc"),		CTL(stats_arenas_i_large_nmalloc)},
+	{NAME("ndalloc"),		CTL(stats_arenas_i_large_ndalloc)},
+	{NAME("nrequests"),		CTL(stats_arenas_i_large_nrequests)}
+};
+
+static const ctl_node_t stats_arenas_i_bins_j_node[] = {
+	{NAME("allocated"),		CTL(stats_arenas_i_bins_j_allocated)},
+	{NAME("nmalloc"),		CTL(stats_arenas_i_bins_j_nmalloc)},
+	{NAME("ndalloc"),		CTL(stats_arenas_i_bins_j_ndalloc)},
+	{NAME("nrequests"),		CTL(stats_arenas_i_bins_j_nrequests)},
+	{NAME("nfills"),		CTL(stats_arenas_i_bins_j_nfills)},
+	{NAME("nflushes"),		CTL(stats_arenas_i_bins_j_nflushes)},
+	{NAME("nruns"),			CTL(stats_arenas_i_bins_j_nruns)},
+	{NAME("nreruns"),		CTL(stats_arenas_i_bins_j_nreruns)},
+	{NAME("curruns"),		CTL(stats_arenas_i_bins_j_curruns)}
+};
+static const ctl_node_t super_stats_arenas_i_bins_j_node[] = {
+	{NAME(""),			CHILD(stats_arenas_i_bins_j)}
+};
+
+static const ctl_node_t stats_arenas_i_bins_node[] = {
+	{INDEX(stats_arenas_i_bins_j)}
+};
+
+static const ctl_node_t stats_arenas_i_lruns_j_node[] = {
+	{NAME("nmalloc"),		CTL(stats_arenas_i_lruns_j_nmalloc)},
+	{NAME("ndalloc"),		CTL(stats_arenas_i_lruns_j_ndalloc)},
+	{NAME("nrequests"),		CTL(stats_arenas_i_lruns_j_nrequests)},
+	{NAME("curruns"),		CTL(stats_arenas_i_lruns_j_curruns)}
+};
+static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = {
+	{NAME(""),			CHILD(stats_arenas_i_lruns_j)}
+};
+
+static const ctl_node_t stats_arenas_i_lruns_node[] = {
+	{INDEX(stats_arenas_i_lruns_j)}
+};
+
+static const ctl_node_t stats_arenas_i_node[] = {
+	{NAME("nthreads"),		CTL(stats_arenas_i_nthreads)},
+	{NAME("pactive"),		CTL(stats_arenas_i_pactive)},
+	{NAME("pdirty"),		CTL(stats_arenas_i_pdirty)},
+	{NAME("mapped"),		CTL(stats_arenas_i_mapped)},
+	{NAME("npurge"),		CTL(stats_arenas_i_npurge)},
+	{NAME("nmadvise"),		CTL(stats_arenas_i_nmadvise)},
+	{NAME("purged"),		CTL(stats_arenas_i_purged)},
+	{NAME("small"),			CHILD(stats_arenas_i_small)},
+	{NAME("large"),			CHILD(stats_arenas_i_large)},
+	{NAME("bins"),			CHILD(stats_arenas_i_bins)},
+	{NAME("lruns"),		CHILD(stats_arenas_i_lruns)}
+};
+static const ctl_node_t super_stats_arenas_i_node[] = {
+	{NAME(""),			CHILD(stats_arenas_i)}
+};
+
+static const ctl_node_t stats_arenas_node[] = {
+	{INDEX(stats_arenas_i)}
+};
+
+static const ctl_node_t stats_node[] = {
+	{NAME("cactive"),		CTL(stats_cactive)},
+	{NAME("allocated"),		CTL(stats_allocated)},
+	{NAME("active"),		CTL(stats_active)},
+	{NAME("mapped"),		CTL(stats_mapped)},
+	{NAME("chunks"),		CHILD(stats_chunks)},
+	{NAME("huge"),			CHILD(stats_huge)},
+	{NAME("arenas"),		CHILD(stats_arenas)}
+};
+
+static const ctl_node_t	root_node[] = {
+	{NAME("version"),	CTL(version)},
+	{NAME("epoch"),		CTL(epoch)},
+	{NAME("thread"),	CHILD(thread)},
+	{NAME("config"),	CHILD(config)},
+	{NAME("opt"),		CHILD(opt)},
+	{NAME("arenas"),	CHILD(arenas)},
+	{NAME("prof"),		CHILD(prof)},
+	{NAME("stats"),		CHILD(stats)}
+};
+static const ctl_node_t super_root_node[] = {
+	{NAME(""),		CHILD(root)}
+};
+
+#undef NAME
+#undef CHILD
+#undef CTL
+#undef INDEX
+
+/******************************************************************************/
+
+static bool
+ctl_arena_init(ctl_arena_stats_t *astats)
+{
+
+	if (astats->lstats == NULL) {
+		astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
+		    sizeof(malloc_large_stats_t));
+		if (astats->lstats == NULL)
+			return (true);
+	}
+
+	return (false);
+}
+
+static void
+ctl_arena_clear(ctl_arena_stats_t *astats)
+{
+
+	astats->pactive = 0;
+	astats->pdirty = 0;
+	if (config_stats) {
+		memset(&astats->astats, 0, sizeof(arena_stats_t));
+		astats->allocated_small = 0;
+		astats->nmalloc_small = 0;
+		astats->ndalloc_small = 0;
+		astats->nrequests_small = 0;
+		memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
+		memset(astats->lstats, 0, nlclasses *
+		    sizeof(malloc_large_stats_t));
+	}
+}
+
+static void
+ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
+{
+	unsigned i;
+
+	arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty,
+	    &cstats->astats, cstats->bstats, cstats->lstats);
+
+	for (i = 0; i < NBINS; i++) {
+		cstats->allocated_small += cstats->bstats[i].allocated;
+		cstats->nmalloc_small += cstats->bstats[i].nmalloc;
+		cstats->ndalloc_small += cstats->bstats[i].ndalloc;
+		cstats->nrequests_small += cstats->bstats[i].nrequests;
+	}
+}
+
+static void
+ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
+{
+	unsigned i;
+
+	sstats->pactive += astats->pactive;
+	sstats->pdirty += astats->pdirty;
+
+	sstats->astats.mapped += astats->astats.mapped;
+	sstats->astats.npurge += astats->astats.npurge;
+	sstats->astats.nmadvise += astats->astats.nmadvise;
+	sstats->astats.purged += astats->astats.purged;
+
+	sstats->allocated_small += astats->allocated_small;
+	sstats->nmalloc_small += astats->nmalloc_small;
+	sstats->ndalloc_small += astats->ndalloc_small;
+	sstats->nrequests_small += astats->nrequests_small;
+
+	sstats->astats.allocated_large += astats->astats.allocated_large;
+	sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
+	sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
+	sstats->astats.nrequests_large += astats->astats.nrequests_large;
+
+	for (i = 0; i < nlclasses; i++) {
+		sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
+		sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
+		sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
+		sstats->lstats[i].curruns += astats->lstats[i].curruns;
+	}
+
+	for (i = 0; i < NBINS; i++) {
+		sstats->bstats[i].allocated += astats->bstats[i].allocated;
+		sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
+		sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
+		sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
+		if (config_tcache) {
+			sstats->bstats[i].nfills += astats->bstats[i].nfills;
+			sstats->bstats[i].nflushes +=
+			    astats->bstats[i].nflushes;
+		}
+		sstats->bstats[i].nruns += astats->bstats[i].nruns;
+		sstats->bstats[i].reruns += astats->bstats[i].reruns;
+		sstats->bstats[i].curruns += astats->bstats[i].curruns;
+	}
+}
+
+static void
+ctl_arena_refresh(arena_t *arena, unsigned i)
+{
+	ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
+	ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas];
+
+	ctl_arena_clear(astats);
+
+	sstats->nthreads += astats->nthreads;
+	if (config_stats) {
+		ctl_arena_stats_amerge(astats, arena);
+		/* Merge into sum stats as well. */
+		ctl_arena_stats_smerge(sstats, astats);
+	} else {
+		astats->pactive += arena->nactive;
+		astats->pdirty += arena->ndirty;
+		/* Merge into sum stats as well. */
+		sstats->pactive += arena->nactive;
+		sstats->pdirty += arena->ndirty;
+	}
+}
+
+static void
+ctl_refresh(void)
+{
+	unsigned i;
+	arena_t *tarenas[narenas];
+
+	if (config_stats) {
+		malloc_mutex_lock(&chunks_mtx);
+		ctl_stats.chunks.current = stats_chunks.curchunks;
+		ctl_stats.chunks.total = stats_chunks.nchunks;
+		ctl_stats.chunks.high = stats_chunks.highchunks;
+		malloc_mutex_unlock(&chunks_mtx);
+
+		malloc_mutex_lock(&huge_mtx);
+		ctl_stats.huge.allocated = huge_allocated;
+		ctl_stats.huge.nmalloc = huge_nmalloc;
+		ctl_stats.huge.ndalloc = huge_ndalloc;
+		malloc_mutex_unlock(&huge_mtx);
+	}
+
+	/*
+	 * Clear sum stats, since they will be merged into by
+	 * ctl_arena_refresh().
+	 */
+	ctl_stats.arenas[narenas].nthreads = 0;
+	ctl_arena_clear(&ctl_stats.arenas[narenas]);
+
+	malloc_mutex_lock(&arenas_lock);
+	memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
+	for (i = 0; i < narenas; i++) {
+		if (arenas[i] != NULL)
+			ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
+		else
+			ctl_stats.arenas[i].nthreads = 0;
+	}
+	malloc_mutex_unlock(&arenas_lock);
+	for (i = 0; i < narenas; i++) {
+		bool initialized = (tarenas[i] != NULL);
+
+		ctl_stats.arenas[i].initialized = initialized;
+		if (initialized)
+			ctl_arena_refresh(tarenas[i], i);
+	}
+
+	if (config_stats) {
+		ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
+		    + ctl_stats.arenas[narenas].astats.allocated_large
+		    + ctl_stats.huge.allocated;
+		ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
+		    LG_PAGE) + ctl_stats.huge.allocated;
+		ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
+	}
+
+	ctl_epoch++;
+}
+
+static bool
+ctl_init(void)
+{
+	bool ret;
+
+	malloc_mutex_lock(&ctl_mtx);
+	if (ctl_initialized == false) {
+		/*
+		 * Allocate space for one extra arena stats element, which
+		 * contains summed stats across all arenas.
+		 */
+		ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
+		    (narenas + 1) * sizeof(ctl_arena_stats_t));
+		if (ctl_stats.arenas == NULL) {
+			ret = true;
+			goto label_return;
+		}
+		memset(ctl_stats.arenas, 0, (narenas + 1) *
+		    sizeof(ctl_arena_stats_t));
+
+		/*
+		 * Initialize all stats structures, regardless of whether they
+		 * ever get used.  Lazy initialization would allow errors to
+		 * cause inconsistent state to be viewable by the application.
+		 */
+		if (config_stats) {
+			unsigned i;
+			for (i = 0; i <= narenas; i++) {
+				if (ctl_arena_init(&ctl_stats.arenas[i])) {
+					ret = true;
+					goto label_return;
+				}
+			}
+		}
+		ctl_stats.arenas[narenas].initialized = true;
+
+		ctl_epoch = 0;
+		ctl_refresh();
+		ctl_initialized = true;
+	}
+
+	ret = false;
+label_return:
+	malloc_mutex_unlock(&ctl_mtx);
+	return (ret);
+}
+
+static int
+ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
+    size_t *depthp)
+{
+	int ret;
+	const char *elm, *tdot, *dot;
+	size_t elen, i, j;
+	const ctl_node_t *node;
+
+	elm = name;
+	/* Equivalent to strchrnul(). */
+	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
+	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
+	if (elen == 0) {
+		ret = ENOENT;
+		goto label_return;
+	}
+	node = super_root_node;
+	for (i = 0; i < *depthp; i++) {
+		assert(node->named);
+		assert(node->u.named.nchildren > 0);
+		if (node->u.named.children[0].named) {
+			const ctl_node_t *pnode = node;
+
+			/* Children are named. */
+			for (j = 0; j < node->u.named.nchildren; j++) {
+				const ctl_node_t *child =
+				    &node->u.named.children[j];
+				if (strlen(child->u.named.name) == elen
+				    && strncmp(elm, child->u.named.name,
+				    elen) == 0) {
+					node = child;
+					if (nodesp != NULL)
+						nodesp[i] = node;
+					mibp[i] = j;
+					break;
+				}
+			}
+			if (node == pnode) {
+				ret = ENOENT;
+				goto label_return;
+			}
+		} else {
+			uintmax_t index;
+			const ctl_node_t *inode;
+
+			/* Children are indexed. */
+			index = malloc_strtoumax(elm, NULL, 10);
+			if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
+				ret = ENOENT;
+				goto label_return;
+			}
+
+			inode = &node->u.named.children[0];
+			node = inode->u.indexed.index(mibp, *depthp,
+			    (size_t)index);
+			if (node == NULL) {
+				ret = ENOENT;
+				goto label_return;
+			}
+
+			if (nodesp != NULL)
+				nodesp[i] = node;
+			mibp[i] = (size_t)index;
+		}
+
+		if (node->ctl != NULL) {
+			/* Terminal node. */
+			if (*dot != '\0') {
+				/*
+				 * The name contains more elements than are
+				 * in this path through the tree.
+				 */
+				ret = ENOENT;
+				goto label_return;
+			}
+			/* Complete lookup successful. */
+			*depthp = i + 1;
+			break;
+		}
+
+		/* Update elm. */
+		if (*dot == '\0') {
+			/* No more elements. */
+			ret = ENOENT;
+			goto label_return;
+		}
+		elm = &dot[1];
+		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
+		    strchr(elm, '\0');
+		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
+	}
+
+	ret = 0;
+label_return:
+	return (ret);
+}
+
+int
+ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
+    size_t newlen)
+{
+	int ret;
+	size_t depth;
+	ctl_node_t const *nodes[CTL_MAX_DEPTH];
+	size_t mib[CTL_MAX_DEPTH];
+
+	if (ctl_initialized == false && ctl_init()) {
+		ret = EAGAIN;
+		goto label_return;
+	}
+
+	depth = CTL_MAX_DEPTH;
+	ret = ctl_lookup(name, nodes, mib, &depth);
+	if (ret != 0)
+		goto label_return;
+
+	if (nodes[depth-1]->ctl == NULL) {
+		/* The name refers to a partial path through the ctl tree. */
+		ret = ENOENT;
+		goto label_return;
+	}
+
+	ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen);
+label_return:
+	return(ret);
+}
+
+int
+ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
+{
+	int ret;
+
+	if (ctl_initialized == false && ctl_init()) {
+		ret = EAGAIN;
+		goto label_return;
+	}
+
+	ret = ctl_lookup(name, NULL, mibp, miblenp);
+label_return:
+	return(ret);
+}
+
+int
+ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+	int ret;
+	const ctl_node_t *node;
+	size_t i;
+
+	if (ctl_initialized == false && ctl_init()) {
+		ret = EAGAIN;
+		goto label_return;
+	}
+
+	/* Iterate down the tree. */
+	node = super_root_node;
+	for (i = 0; i < miblen; i++) {
+		if (node->u.named.children[0].named) {
+			/* Children are named. */
+			if (node->u.named.nchildren <= mib[i]) {
+				ret = ENOENT;
+				goto label_return;
+			}
+			node = &node->u.named.children[mib[i]];
+		} else {
+			const ctl_node_t *inode;
+
+			/* Indexed element. */
+			inode = &node->u.named.children[0];
+			node = inode->u.indexed.index(mib, miblen, mib[i]);
+			if (node == NULL) {
+				ret = ENOENT;
+				goto label_return;
+			}
+		}
+	}
+
+	/* Call the ctl function. */
+	if (node->ctl == NULL) {
+		/* Partial MIB. */
+		ret = ENOENT;
+		goto label_return;
+	}
+	ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
+
+label_return:
+	return(ret);
+}
+
+bool
+ctl_boot(void)
+{
+
+	if (malloc_mutex_init(&ctl_mtx))
+		return (true);
+
+	ctl_initialized = false;
+
+	return (false);
+}
+
+/******************************************************************************/
+/* *_ctl() functions. */
+
+#define	READONLY()	do {						\
+	if (newp != NULL || newlen != 0) {				\
+		ret = EPERM;						\
+		goto label_return;						\
+	}								\
+} while (0)
+
+#define	WRITEONLY()	do {						\
+	if (oldp != NULL || oldlenp != NULL) {				\
+		ret = EPERM;						\
+		goto label_return;						\
+	}								\
+} while (0)
+
+#define	VOID()	do {							\
+	READONLY();							\
+	WRITEONLY();							\
+} while (0)
+
+#define	READ(v, t)	do {						\
+	if (oldp != NULL && oldlenp != NULL) {				\
+		if (*oldlenp != sizeof(t)) {				\
+			size_t	copylen = (sizeof(t) <= *oldlenp)	\
+			    ? sizeof(t) : *oldlenp;			\
+			memcpy(oldp, (void *)&v, copylen);		\
+			ret = EINVAL;					\
+			goto label_return;					\
+		} else							\
+			*(t *)oldp = v;					\
+	}								\
+} while (0)
+
+#define	WRITE(v, t)	do {						\
+	if (newp != NULL) {						\
+		if (newlen != sizeof(t)) {				\
+			ret = EINVAL;					\
+			goto label_return;					\
+		}							\
+		v = *(t *)newp;						\
+	}								\
+} while (0)
+
+/*
+ * There's a lot of code duplication in the following macros due to limitations
+ * in how nested cpp macros are expanded.
+ */
+#define	CTL_RO_CLGEN(c, l, n, v, t)					\
+static int								\
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
+    void *newp, size_t newlen)						\
+{									\
+	int ret;							\
+	t oldval;							\
+									\
+	if ((c) == false)						\
+		return (ENOENT);					\
+	if (l)								\
+		malloc_mutex_lock(&ctl_mtx);				\
+	READONLY();							\
+	oldval = v;							\
+	READ(oldval, t);						\
+									\
+	ret = 0;							\
+label_return:									\
+	if (l)								\
+		malloc_mutex_unlock(&ctl_mtx);				\
+	return (ret);							\
+}
+
+#define	CTL_RO_CGEN(c, n, v, t)						\
+static int								\
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
+    void *newp, size_t newlen)						\
+{									\
+	int ret;							\
+	t oldval;							\
+									\
+	if ((c) == false)						\
+		return (ENOENT);					\
+	malloc_mutex_lock(&ctl_mtx);					\
+	READONLY();							\
+	oldval = v;							\
+	READ(oldval, t);						\
+									\
+	ret = 0;							\
+label_return:									\
+	malloc_mutex_unlock(&ctl_mtx);					\
+	return (ret);							\
+}
+
+#define	CTL_RO_GEN(n, v, t)						\
+static int								\
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
+    void *newp, size_t newlen)						\
+{									\
+	int ret;							\
+	t oldval;							\
+									\
+	malloc_mutex_lock(&ctl_mtx);					\
+	READONLY();							\
+	oldval = v;							\
+	READ(oldval, t);						\
+									\
+	ret = 0;							\
+label_return:									\
+	malloc_mutex_unlock(&ctl_mtx);					\
+	return (ret);							\
+}
+
+/*
+ * ctl_mtx is not acquired, under the assumption that no pertinent data will
+ * mutate during the call.
+ */
+#define	CTL_RO_NL_CGEN(c, n, v, t)					\
+static int								\
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
+    void *newp, size_t newlen)						\
+{									\
+	int ret;							\
+	t oldval;							\
+									\
+	if ((c) == false)						\
+		return (ENOENT);					\
+	READONLY();							\
+	oldval = v;							\
+	READ(oldval, t);						\
+									\
+	ret = 0;							\
+label_return:									\
+	return (ret);							\
+}
+
+#define	CTL_RO_NL_GEN(n, v, t)						\
+static int								\
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
+    void *newp, size_t newlen)						\
+{									\
+	int ret;							\
+	t oldval;							\
+									\
+	READONLY();							\
+	oldval = v;							\
+	READ(oldval, t);						\
+									\
+	ret = 0;							\
+label_return:									\
+	return (ret);							\
+}
+
+#define	CTL_RO_BOOL_CONFIG_GEN(n)					\
+static int								\
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
+    void *newp, size_t newlen)						\
+{									\
+	int ret;							\
+	bool oldval;							\
+									\
+	READONLY();							\
+	oldval = n;							\
+	READ(oldval, bool);						\
+									\
+	ret = 0;							\
+label_return:									\
+	return (ret);							\
+}
+
+CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
+
+static int
+epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+	int ret;
+	uint64_t newval;
+
+	malloc_mutex_lock(&ctl_mtx);
+	newval = 0;
+	WRITE(newval, uint64_t);
+	if (newval != 0)
+		ctl_refresh();
+	READ(ctl_epoch, uint64_t);
+
+	ret = 0;
+label_return:
+	malloc_mutex_unlock(&ctl_mtx);
+	return (ret);
+}
+
+static int
+thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
+    size_t *oldlenp, void *newp, size_t newlen)
+{
+	int ret;
+	bool oldval;
+
+	if (config_tcache == false)
+		return (ENOENT);
+
+	oldval = tcache_enabled_get();
+	if (newp != NULL) {
+		if (newlen != sizeof(bool)) {
+			ret = EINVAL;
+			goto label_return;
+		}
+		tcache_enabled_set(*(bool *)newp);
+	}
+	READ(oldval, bool);
+
+label_return:
+	ret = 0;
+	return (ret);
+}
+
+static int
+thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
+    size_t *oldlenp, void *newp, size_t newlen)
+{
+	int ret;
+
+	if (config_tcache == false)
+		return (ENOENT);
+
+	VOID();
+
+	tcache_flush();
+
+	ret = 0;
+label_return:
+	return (ret);
+}
+
+static int
+thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+	int ret;
+	unsigned newind, oldind;
+
+	newind = oldind = choose_arena(NULL)->ind;
+	WRITE(newind, unsigned);
+	READ(oldind, unsigned);
+	if (newind != oldind) {
+		arena_t *arena;
+
+		if (newind >= narenas) {
+			/* New arena index is out of range. */
+			ret = EFAULT;
+			goto label_return;
+		}
+
+		/* Initialize arena if necessary. */
+		malloc_mutex_lock(&arenas_lock);
+		if ((arena = arenas[newind]) == NULL && (arena =
+		    arenas_extend(newind)) == NULL) {
+			malloc_mutex_unlock(&arenas_lock);
+			ret = EAGAIN;
+			goto label_return;
+		}
+		assert(arena == arenas[newind]);
+		arenas[oldind]->nthreads--;
+		arenas[newind]->nthreads++;
+		malloc_mutex_unlock(&arenas_lock);
+
+		/* Set new arena association. */
+		if (config_tcache) {
+			tcache_t *tcache;
+			if ((uintptr_t)(tcache = *tcache_tsd_get()) >
+			    (uintptr_t)TCACHE_STATE_MAX) {
+				tcache_arena_dissociate(tcache);
+				tcache_arena_associate(tcache, arena);
+			}
+		}
+		arenas_tsd_set(&arena);
+	}
+
+	ret = 0;
+label_return:
+	return (ret);
+}
+
+CTL_RO_NL_CGEN(config_stats, thread_allocated,
+    thread_allocated_tsd_get()->allocated, uint64_t)
+CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
+    &thread_allocated_tsd_get()->allocated, uint64_t *)
+CTL_RO_NL_CGEN(config_stats, thread_deallocated,
+    thread_allocated_tsd_get()->deallocated, uint64_t)
+CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
+    &thread_allocated_tsd_get()->deallocated, uint64_t *)
+
+/******************************************************************************/
+
+CTL_RO_BOOL_CONFIG_GEN(config_debug)
+CTL_RO_BOOL_CONFIG_GEN(config_dss)
+CTL_RO_BOOL_CONFIG_GEN(config_fill)
+CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
+CTL_RO_BOOL_CONFIG_GEN(config_munmap)
+CTL_RO_BOOL_CONFIG_GEN(config_prof)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
+CTL_RO_BOOL_CONFIG_GEN(config_stats)
+CTL_RO_BOOL_CONFIG_GEN(config_tcache)
+CTL_RO_BOOL_CONFIG_GEN(config_tls)
+CTL_RO_BOOL_CONFIG_GEN(config_utrace)
+CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
+CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
+
+/******************************************************************************/
+
+CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
+CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
+CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
+CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
+CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
+CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
+CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
+CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
+CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
+
+/******************************************************************************/
+
+CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
+CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
+const ctl_node_t *
+arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+
+	if (i > NBINS)
+		return (NULL);
+	return (super_arenas_bin_i_node);
+}
+
+CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
+const ctl_node_t *
+arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+
+	if (i > nlclasses)
+		return (NULL);
+	return (super_arenas_lrun_i_node);
+}
+
+CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
+
+static int
+arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
+    size_t *oldlenp, void *newp, size_t newlen)
+{
+	int ret;
+	unsigned nread, i;
+
+	malloc_mutex_lock(&ctl_mtx);
+	READONLY();
+	if (*oldlenp != narenas * sizeof(bool)) {
+		ret = EINVAL;
+		nread = (*oldlenp < narenas * sizeof(bool))
+		    ? (*oldlenp / sizeof(bool)) : narenas;
+	} else {
+		ret = 0;
+		nread = narenas;
+	}
+
+	for (i = 0; i < nread; i++)
+		((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
+
+label_return:
+	malloc_mutex_unlock(&ctl_mtx);
+	return (ret);
+}
+
+CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
+CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
+CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
+CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
+CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
+CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
+
+static int
+arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+	int ret;
+	unsigned arena;
+
+	WRITEONLY();
+	arena = UINT_MAX;
+	WRITE(arena, unsigned);
+	if (newp != NULL && arena >= narenas) {
+		ret = EFAULT;
+		goto label_return;
+	} else {
+		arena_t *tarenas[narenas];
+
+		malloc_mutex_lock(&arenas_lock);
+		memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
+		malloc_mutex_unlock(&arenas_lock);
+
+		if (arena == UINT_MAX) {
+			unsigned i;
+			for (i = 0; i < narenas; i++) {
+				if (tarenas[i] != NULL)
+					arena_purge_all(tarenas[i]);
+			}
+		} else {
+			assert(arena < narenas);
+			if (tarenas[arena] != NULL)
+				arena_purge_all(tarenas[arena]);
+		}
+	}
+
+	ret = 0;
+label_return:
+	return (ret);
+}
+
+/******************************************************************************/
+
+static int
+prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+	int ret;
+	bool oldval;
+
+	if (config_prof == false)
+		return (ENOENT);
+
+	malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
+	oldval = opt_prof_active;
+	if (newp != NULL) {
+		/*
+		 * The memory barriers will tend to make opt_prof_active
+		 * propagate faster on systems with weak memory ordering.
+		 */
+		mb_write();
+		WRITE(opt_prof_active, bool);
+		mb_write();
+	}
+	READ(oldval, bool);
+
+	ret = 0;
+label_return:
+	malloc_mutex_unlock(&ctl_mtx);
+	return (ret);
+}
+
+static int
+prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+    void *newp, size_t newlen)
+{
+	int ret;
+	const char *filename = NULL;
+
+	if (config_prof == false)
+		return (ENOENT);
+
+	WRITEONLY();
+	WRITE(filename, const char *);
+
+	if (prof_mdump(filename)) {
+		ret = EFAULT;
+		goto label_return;
+	}
+
+	ret = 0;
+label_return:
+	return (ret);
+}
+
+CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
+
+/******************************************************************************/
+
+CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
+    size_t)
+CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
+CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
+CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
+    ctl_stats.arenas[mib[2]].allocated_small, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
+    ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
+    ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
+    ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
+    ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
+    ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
+    ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
+    ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
+    ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
+    ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
+    ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
+    ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
+    ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
+    ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
+    ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
+    ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
+    ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
+
+const ctl_node_t *
+stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
+{
+
+	if (j > NBINS)
+		return (NULL);
+	return (super_stats_arenas_i_bins_j_node);
+}
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
+    ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
+    ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
+    ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
+    ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
+
+const ctl_node_t *
+stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
+{
+
+	if (j > nlclasses)
+		return (NULL);
+	return (super_stats_arenas_i_lruns_j_node);
+}
+
+CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
+CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
+    ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
+    ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
+    ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
+    ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+
+const ctl_node_t *
+stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+	const ctl_node_t * ret;
+
+	malloc_mutex_lock(&ctl_mtx);
+	if (ctl_stats.arenas[i].initialized == false) {
+		ret = NULL;
+		goto label_return;
+	}
+
+	ret = super_stats_arenas_i_node;
+label_return:
+	malloc_mutex_unlock(&ctl_mtx);
+	return (ret);
+}
+
+CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/extent.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/extent.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,39 @@
+#define	JEMALLOC_EXTENT_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+static inline int
+extent_szad_comp(extent_node_t *a, extent_node_t *b)
+{
+	int ret;
+	size_t a_size = a->size;
+	size_t b_size = b->size;
+
+	ret = (a_size > b_size) - (a_size < b_size);
+	if (ret == 0) {
+		uintptr_t a_addr = (uintptr_t)a->addr;
+		uintptr_t b_addr = (uintptr_t)b->addr;
+
+		ret = (a_addr > b_addr) - (a_addr < b_addr);
+	}
+
+	return (ret);
+}
+
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
+    extent_szad_comp)
+
+static inline int
+extent_ad_comp(extent_node_t *a, extent_node_t *b)
+{
+	uintptr_t a_addr = (uintptr_t)a->addr;
+	uintptr_t b_addr = (uintptr_t)b->addr;
+
+	return ((a_addr > b_addr) - (a_addr < b_addr));
+}
+
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
+    extent_ad_comp)
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/hash.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/hash.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,2 @@
+#define	JEMALLOC_HASH_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/huge.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/huge.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,306 @@
+#define	JEMALLOC_HUGE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+uint64_t	huge_nmalloc;
+uint64_t	huge_ndalloc;
+size_t		huge_allocated;
+
+malloc_mutex_t	huge_mtx;
+
+/******************************************************************************/
+
+/* Tree of chunks that are stand-alone huge allocations. */
+static extent_tree_t	huge;
+
+void *
+huge_malloc(size_t size, bool zero)
+{
+
+	return (huge_palloc(size, chunksize, zero));
+}
+
+void *
+huge_palloc(size_t size, size_t alignment, bool zero)
+{
+	void *ret;
+	size_t csize;
+	extent_node_t *node;
+
+	/* Allocate one or more contiguous chunks for this request. */
+
+	csize = CHUNK_CEILING(size);
+	if (csize == 0) {
+		/* size is large enough to cause size_t wrap-around. */
+		return (NULL);
+	}
+
+	/* Allocate an extent node with which to track the chunk. */
+	node = base_node_alloc();
+	if (node == NULL)
+		return (NULL);
+
+	ret = chunk_alloc(csize, alignment, false, &zero);
+	if (ret == NULL) {
+		base_node_dealloc(node);
+		return (NULL);
+	}
+
+	/* Insert node into huge. */
+	node->addr = ret;
+	node->size = csize;
+
+	malloc_mutex_lock(&huge_mtx);
+	extent_tree_ad_insert(&huge, node);
+	if (config_stats) {
+		stats_cactive_add(csize);
+		huge_nmalloc++;
+		huge_allocated += csize;
+	}
+	malloc_mutex_unlock(&huge_mtx);
+
+	if (config_fill && zero == false) {
+		if (opt_junk)
+			memset(ret, 0xa5, csize);
+		else if (opt_zero)
+			memset(ret, 0, csize);
+	}
+
+	return (ret);
+}
+
+void *
+huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
+{
+
+	/*
+	 * Avoid moving the allocation if the size class can be left the same.
+	 */
+	if (oldsize > arena_maxclass
+	    && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
+	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
+		assert(CHUNK_CEILING(oldsize) == oldsize);
+		if (config_fill && opt_junk && size < oldsize) {
+			memset((void *)((uintptr_t)ptr + size), 0x5a,
+			    oldsize - size);
+		}
+		return (ptr);
+	}
+
+	/* Reallocation would require a move. */
+	return (NULL);
+}
+
+void *
+huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
+    size_t alignment, bool zero)
+{
+	void *ret;
+	size_t copysize;
+
+	/* Try to avoid moving the allocation. */
+	ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
+	if (ret != NULL)
+		return (ret);
+
+	/*
+	 * size and oldsize are different enough that we need to use a
+	 * different size class.  In that case, fall back to allocating new
+	 * space and copying.
+	 */
+	if (alignment > chunksize)
+		ret = huge_palloc(size + extra, alignment, zero);
+	else
+		ret = huge_malloc(size + extra, zero);
+
+	if (ret == NULL) {
+		if (extra == 0)
+			return (NULL);
+		/* Try again, this time without extra. */
+		if (alignment > chunksize)
+			ret = huge_palloc(size, alignment, zero);
+		else
+			ret = huge_malloc(size, zero);
+
+		if (ret == NULL)
+			return (NULL);
+	}
+
+	/*
+	 * Copy at most size bytes (not size+extra), since the caller has no
+	 * expectation that the extra bytes will be reliably preserved.
+	 */
+	copysize = (size < oldsize) ? size : oldsize;
+
+	/*
+	 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
+	 * source nor the destination are in dss.
+	 */
+#ifdef JEMALLOC_MREMAP_FIXED
+	if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
+	    == false && chunk_in_dss(ret) == false))) {
+		size_t newsize = huge_salloc(ret);
+
+		/*
+		 * Remove ptr from the tree of huge allocations before
+		 * performing the remap operation, in order to avoid the
+		 * possibility of another thread acquiring that mapping before
+		 * this one removes it from the tree.
+		 */
+		huge_dalloc(ptr, false);
+		if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
+		    ret) == MAP_FAILED) {
+			/*
+			 * Assuming no chunk management bugs in the allocator,
+			 * the only documented way an error can occur here is
+			 * if the application changed the map type for a
+			 * portion of the old allocation.  This is firmly in
+			 * undefined behavior territory, so write a diagnostic
+			 * message, and optionally abort.
+			 */
+			char buf[BUFERROR_BUF];
+
+			buferror(errno, buf, sizeof(buf));
+			malloc_printf("<jemalloc>: Error in mremap(): %s\n",
+			    buf);
+			if (opt_abort)
+				abort();
+			memcpy(ret, ptr, copysize);
+			chunk_dealloc_mmap(ptr, oldsize);
+		}
+	} else
+#endif
+	{
+		memcpy(ret, ptr, copysize);
+		iqalloc(ptr);
+	}
+	return (ret);
+}
+
+void
+huge_dalloc(void *ptr, bool unmap)
+{
+	extent_node_t *node, key;
+
+	malloc_mutex_lock(&huge_mtx);
+
+	/* Extract from tree of huge allocations. */
+	key.addr = ptr;
+	node = extent_tree_ad_search(&huge, &key);
+	assert(node != NULL);
+	assert(node->addr == ptr);
+	extent_tree_ad_remove(&huge, node);
+
+	if (config_stats) {
+		stats_cactive_sub(node->size);
+		huge_ndalloc++;
+		huge_allocated -= node->size;
+	}
+
+	malloc_mutex_unlock(&huge_mtx);
+
+	if (unmap && config_fill && config_dss && opt_junk)
+		memset(node->addr, 0x5a, node->size);
+
+	chunk_dealloc(node->addr, node->size, unmap);
+
+	base_node_dealloc(node);
+}
+
+size_t
+huge_salloc(const void *ptr)
+{
+	size_t ret;
+	extent_node_t *node, key;
+
+	malloc_mutex_lock(&huge_mtx);
+
+	/* Extract from tree of huge allocations. */
+	key.addr = __DECONST(void *, ptr);
+	node = extent_tree_ad_search(&huge, &key);
+	assert(node != NULL);
+
+	ret = node->size;
+
+	malloc_mutex_unlock(&huge_mtx);
+
+	return (ret);
+}
+
+prof_ctx_t *
+huge_prof_ctx_get(const void *ptr)
+{
+	prof_ctx_t *ret;
+	extent_node_t *node, key;
+
+	malloc_mutex_lock(&huge_mtx);
+
+	/* Extract from tree of huge allocations. */
+	key.addr = __DECONST(void *, ptr);
+	node = extent_tree_ad_search(&huge, &key);
+	assert(node != NULL);
+
+	ret = node->prof_ctx;
+
+	malloc_mutex_unlock(&huge_mtx);
+
+	return (ret);
+}
+
+void
+huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+{
+	extent_node_t *node, key;
+
+	malloc_mutex_lock(&huge_mtx);
+
+	/* Extract from tree of huge allocations. */
+	key.addr = __DECONST(void *, ptr);
+	node = extent_tree_ad_search(&huge, &key);
+	assert(node != NULL);
+
+	node->prof_ctx = ctx;
+
+	malloc_mutex_unlock(&huge_mtx);
+}
+
+bool
+huge_boot(void)
+{
+
+	/* Initialize chunks data. */
+	if (malloc_mutex_init(&huge_mtx))
+		return (true);
+	extent_tree_ad_new(&huge);
+
+	if (config_stats) {
+		huge_nmalloc = 0;
+		huge_ndalloc = 0;
+		huge_allocated = 0;
+	}
+
+	return (false);
+}
+
+void
+huge_prefork(void)
+{
+
+	malloc_mutex_prefork(&huge_mtx);
+}
+
+void
+huge_postfork_parent(void)
+{
+
+	malloc_mutex_postfork_parent(&huge_mtx);
+}
+
+void
+huge_postfork_child(void)
+{
+
+	malloc_mutex_postfork_child(&huge_mtx);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/jemalloc.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/jemalloc.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,1733 @@
+#define	JEMALLOC_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+malloc_tsd_data(, arenas, arena_t *, NULL)
+malloc_tsd_data(, thread_allocated, thread_allocated_t,
+    THREAD_ALLOCATED_INITIALIZER)
+
+const char	*__malloc_options_1_0;
+__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
+
+/* Runtime configuration options. */
+const char	*je_malloc_conf JEMALLOC_ATTR(visibility("default"));
+#ifdef JEMALLOC_DEBUG
+bool	opt_abort = true;
+#  ifdef JEMALLOC_FILL
+bool	opt_junk = true;
+#  else
+bool	opt_junk = false;
+#  endif
+#else
+bool	opt_abort = false;
+bool	opt_junk = false;
+#endif
+size_t	opt_quarantine = ZU(0);
+bool	opt_redzone = false;
+bool	opt_utrace = false;
+bool	opt_valgrind = false;
+bool	opt_xmalloc = false;
+bool	opt_zero = false;
+size_t	opt_narenas = 0;
+
+unsigned	ncpus;
+
+malloc_mutex_t		arenas_lock;
+arena_t			**arenas;
+unsigned		narenas;
+
+/* Set to true once the allocator has been initialized. */
+static bool		malloc_initialized = false;
+
+#ifdef JEMALLOC_THREADED_INIT
+/* Used to let the initializing thread recursively allocate. */
+#  define NO_INITIALIZER	((unsigned long)0)
+#  define INITIALIZER		pthread_self()
+#  define IS_INITIALIZER	(malloc_initializer == pthread_self())
+static pthread_t		malloc_initializer = NO_INITIALIZER;
+#else
+#  define NO_INITIALIZER	false
+#  define INITIALIZER		true
+#  define IS_INITIALIZER	malloc_initializer
+static bool			malloc_initializer = NO_INITIALIZER;
+#endif
+
+/* Used to avoid initialization races. */
+static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
+
+typedef struct {
+	void	*p;	/* Input pointer (as in realloc(p, s)). */
+	size_t	s;	/* Request size. */
+	void	*r;	/* Result pointer. */
+} malloc_utrace_t;
+
+#ifdef JEMALLOC_UTRACE
+#  define UTRACE(a, b, c) do {						\
+	if (opt_utrace) {						\
+		malloc_utrace_t ut;					\
+		ut.p = (a);						\
+		ut.s = (b);						\
+		ut.r = (c);						\
+		utrace(&ut, sizeof(ut));				\
+	}								\
+} while (0)
+#else
+#  define UTRACE(a, b, c)
+#endif
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void	stats_print_atexit(void);
+static unsigned	malloc_ncpus(void);
+static bool	malloc_conf_next(char const **opts_p, char const **k_p,
+    size_t *klen_p, char const **v_p, size_t *vlen_p);
+static void	malloc_conf_error(const char *msg, const char *k, size_t klen,
+    const char *v, size_t vlen);
+static void	malloc_conf_init(void);
+static bool	malloc_init_hard(void);
+static int	imemalign(void **memptr, size_t alignment, size_t size,
+    size_t min_alignment);
+
+/******************************************************************************/
+/*
+ * Begin miscellaneous support functions.
+ */
+
+/* Create a new arena and insert it into the arenas array at index ind. */
+arena_t *
+arenas_extend(unsigned ind)
+{
+	arena_t *ret;
+
+	ret = (arena_t *)base_alloc(sizeof(arena_t));
+	if (ret != NULL && arena_new(ret, ind) == false) {
+		arenas[ind] = ret;
+		return (ret);
+	}
+	/* Only reached if there is an OOM error. */
+
+	/*
+	 * OOM here is quite inconvenient to propagate, since dealing with it
+	 * would require a check for failure in the fast path.  Instead, punt
+	 * by using arenas[0].  In practice, this is an extremely unlikely
+	 * failure.
+	 */
+	malloc_write("<jemalloc>: Error initializing arena\n");
+	if (opt_abort)
+		abort();
+
+	return (arenas[0]);
+}
+
+/* Slow path, called only by choose_arena(). */
+arena_t *
+choose_arena_hard(void)
+{
+	arena_t *ret;
+
+	if (narenas > 1) {
+		unsigned i, choose, first_null;
+
+		choose = 0;
+		first_null = narenas;
+		malloc_mutex_lock(&arenas_lock);
+		assert(arenas[0] != NULL);
+		for (i = 1; i < narenas; i++) {
+			if (arenas[i] != NULL) {
+				/*
+				 * Choose the first arena that has the lowest
+				 * number of threads assigned to it.
+				 */
+				if (arenas[i]->nthreads <
+				    arenas[choose]->nthreads)
+					choose = i;
+			} else if (first_null == narenas) {
+				/*
+				 * Record the index of the first uninitialized
+				 * arena, in case all extant arenas are in use.
+				 *
+				 * NB: It is possible for there to be
+				 * discontinuities in terms of initialized
+				 * versus uninitialized arenas, due to the
+				 * "thread.arena" mallctl.
+				 */
+				first_null = i;
+			}
+		}
+
+		if (arenas[choose]->nthreads == 0 || first_null == narenas) {
+			/*
+			 * Use an unloaded arena, or the least loaded arena if
+			 * all arenas are already initialized.
+			 */
+			ret = arenas[choose];
+		} else {
+			/* Initialize a new arena. */
+			ret = arenas_extend(first_null);
+		}
+		ret->nthreads++;
+		malloc_mutex_unlock(&arenas_lock);
+	} else {
+		ret = arenas[0];
+		malloc_mutex_lock(&arenas_lock);
+		ret->nthreads++;
+		malloc_mutex_unlock(&arenas_lock);
+	}
+
+	arenas_tsd_set(&ret);
+
+	return (ret);
+}
+
+static void
+stats_print_atexit(void)
+{
+
+	if (config_tcache && config_stats) {
+		unsigned i;
+
+		/*
+		 * Merge stats from extant threads.  This is racy, since
+		 * individual threads do not lock when recording tcache stats
+		 * events.  As a consequence, the final stats may be slightly
+		 * out of date by the time they are reported, if other threads
+		 * continue to allocate.
+		 */
+		for (i = 0; i < narenas; i++) {
+			arena_t *arena = arenas[i];
+			if (arena != NULL) {
+				tcache_t *tcache;
+
+				/*
+				 * tcache_stats_merge() locks bins, so if any
+				 * code is introduced that acquires both arena
+				 * and bin locks in the opposite order,
+				 * deadlocks may result.
+				 */
+				malloc_mutex_lock(&arena->lock);
+				ql_foreach(tcache, &arena->tcache_ql, link) {
+					tcache_stats_merge(tcache, arena);
+				}
+				malloc_mutex_unlock(&arena->lock);
+			}
+		}
+	}
+	je_malloc_stats_print(NULL, NULL, NULL);
+}
+
+/*
+ * End miscellaneous support functions.
+ */
+/******************************************************************************/
+/*
+ * Begin initialization functions.
+ */
+
+static unsigned
+malloc_ncpus(void)
+{
+	unsigned ret;
+	long result;
+
+	result = sysconf(_SC_NPROCESSORS_ONLN);
+	if (result == -1) {
+		/* Error. */
+		ret = 1;
+	}
+	ret = (unsigned)result;
+
+	return (ret);
+}
+
+void
+arenas_cleanup(void *arg)
+{
+	arena_t *arena = *(arena_t **)arg;
+
+	malloc_mutex_lock(&arenas_lock);
+	arena->nthreads--;
+	malloc_mutex_unlock(&arenas_lock);
+}
+
+static inline bool
+malloc_init(void)
+{
+
+	if (malloc_initialized == false)
+		return (malloc_init_hard());
+
+	return (false);
+}
+
+static bool
+malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
+    char const **v_p, size_t *vlen_p)
+{
+	bool accept;
+	const char *opts = *opts_p;
+
+	*k_p = opts;
+
+	for (accept = false; accept == false;) {
+		switch (*opts) {
+		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
+		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
+		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
+		case 'Y': case 'Z':
+		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
+		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
+		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
+		case 'y': case 'z':
+		case '0': case '1': case '2': case '3': case '4': case '5':
+		case '6': case '7': case '8': case '9':
+		case '_':
+			opts++;
+			break;
+		case ':':
+			opts++;
+			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
+			*v_p = opts;
+			accept = true;
+			break;
+		case '\0':
+			if (opts != *opts_p) {
+				malloc_write("<jemalloc>: Conf string ends "
+				    "with key\n");
+			}
+			return (true);
+		default:
+			malloc_write("<jemalloc>: Malformed conf string\n");
+			return (true);
+		}
+	}
+
+	for (accept = false; accept == false;) {
+		switch (*opts) {
+		case ',':
+			opts++;
+			/*
+			 * Look ahead one character here, because the next time
+			 * this function is called, it will assume that end of
+			 * input has been cleanly reached if no input remains,
+			 * but we have optimistically already consumed the
+			 * comma if one exists.
+			 */
+			if (*opts == '\0') {
+				malloc_write("<jemalloc>: Conf string ends "
+				    "with comma\n");
+			}
+			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
+			accept = true;
+			break;
+		case '\0':
+			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
+			accept = true;
+			break;
+		default:
+			opts++;
+			break;
+		}
+	}
+
+	*opts_p = opts;
+	return (false);
+}
+
+static void
+malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
+    size_t vlen)
+{
+
+	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
+	    (int)vlen, v);
+}
+
+static void
+malloc_conf_init(void)
+{
+	unsigned i;
+	char buf[PATH_MAX + 1];
+	const char *opts, *k, *v;
+	size_t klen, vlen;
+
+	for (i = 0; i < 3; i++) {
+		/* Get runtime configuration. */
+		switch (i) {
+		case 0:
+			if (je_malloc_conf != NULL) {
+				/*
+				 * Use options that were compiled into the
+				 * program.
+				 */
+				opts = je_malloc_conf;
+			} else {
+				/* No configuration specified. */
+				buf[0] = '\0';
+				opts = buf;
+			}
+			break;
+		case 1: {
+			int linklen;
+			const char *linkname =
+#ifdef JEMALLOC_PREFIX
+			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
+#else
+			    "/etc/malloc.conf"
+#endif
+			    ;
+
+			if ((linklen = readlink(linkname, buf,
+			    sizeof(buf) - 1)) != -1) {
+				/*
+				 * Use the contents of the "/etc/malloc.conf"
+				 * symbolic link's name.
+				 */
+				buf[linklen] = '\0';
+				opts = buf;
+			} else {
+				/* No configuration specified. */
+				buf[0] = '\0';
+				opts = buf;
+			}
+			break;
+		} case 2: {
+			const char *envname =
+#ifdef JEMALLOC_PREFIX
+			    JEMALLOC_CPREFIX"MALLOC_CONF"
+#else
+			    "MALLOC_CONF"
+#endif
+			    ;
+
+			if (issetugid() == 0 && (opts = getenv(envname)) !=
+			    NULL) {
+				/*
+				 * Do nothing; opts is already initialized to
+				 * the value of the MALLOC_CONF environment
+				 * variable.
+				 */
+			} else {
+				/* No configuration specified. */
+				buf[0] = '\0';
+				opts = buf;
+			}
+			break;
+		} default:
+			/* NOTREACHED */
+			assert(false);
+			buf[0] = '\0';
+			opts = buf;
+		}
+
+		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
+		    &vlen) == false) {
+#define	CONF_HANDLE_BOOL_HIT(o, n, hit)					\
+			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
+			    klen) == 0) {				\
+				if (strncmp("true", v, vlen) == 0 &&	\
+				    vlen == sizeof("true")-1)		\
+					o = true;			\
+				else if (strncmp("false", v, vlen) ==	\
+				    0 && vlen == sizeof("false")-1)	\
+					o = false;			\
+				else {					\
+					malloc_conf_error(		\
+					    "Invalid conf value",	\
+					    k, klen, v, vlen);		\
+				}					\
+				hit = true;				\
+			} else						\
+				hit = false;
+#define	CONF_HANDLE_BOOL(o, n) {					\
+			bool hit;					\
+			CONF_HANDLE_BOOL_HIT(o, n, hit);		\
+			if (hit)					\
+				continue;				\
+}
+#define	CONF_HANDLE_SIZE_T(o, n, min, max)				\
+			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
+			    klen) == 0) {				\
+				uintmax_t um;				\
+				char *end;				\
+									\
+				errno = 0;				\
+				um = malloc_strtoumax(v, &end, 0);	\
+				if (errno != 0 || (uintptr_t)end -	\
+				    (uintptr_t)v != vlen) {		\
+					malloc_conf_error(		\
+					    "Invalid conf value",	\
+					    k, klen, v, vlen);		\
+				} else if (um < min || um > max) {	\
+					malloc_conf_error(		\
+					    "Out-of-range conf value",	\
+					    k, klen, v, vlen);		\
+				} else					\
+					o = um;				\
+				continue;				\
+			}
+#define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
+			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
+			    klen) == 0) {				\
+				long l;					\
+				char *end;				\
+									\
+				errno = 0;				\
+				l = strtol(v, &end, 0);			\
+				if (errno != 0 || (uintptr_t)end -	\
+				    (uintptr_t)v != vlen) {		\
+					malloc_conf_error(		\
+					    "Invalid conf value",	\
+					    k, klen, v, vlen);		\
+				} else if (l < (ssize_t)min || l >	\
+				    (ssize_t)max) {			\
+					malloc_conf_error(		\
+					    "Out-of-range conf value",	\
+					    k, klen, v, vlen);		\
+				} else					\
+					o = l;				\
+				continue;				\
+			}
+#define	CONF_HANDLE_CHAR_P(o, n, d)					\
+			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
+			    klen) == 0) {				\
+				size_t cpylen = (vlen <=		\
+				    sizeof(o)-1) ? vlen :		\
+				    sizeof(o)-1;			\
+				strncpy(o, v, cpylen);			\
+				o[cpylen] = '\0';			\
+				continue;				\
+			}
+
+			CONF_HANDLE_BOOL(opt_abort, abort)
+			/*
+			 * Chunks always require at least one header page, plus
+			 * one data page in the absence of redzones, or three
+			 * pages in the presence of redzones.  In order to
+			 * simplify options processing, fix the limit based on
+			 * config_fill.
+			 */
+			CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE +
+			    (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
+			CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
+			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
+			    -1, (sizeof(size_t) << 3) - 1)
+			CONF_HANDLE_BOOL(opt_stats_print, stats_print)
+			if (config_fill) {
+				CONF_HANDLE_BOOL(opt_junk, junk)
+				CONF_HANDLE_SIZE_T(opt_quarantine, quarantine,
+				    0, SIZE_T_MAX)
+				CONF_HANDLE_BOOL(opt_redzone, redzone)
+				CONF_HANDLE_BOOL(opt_zero, zero)
+			}
+			if (config_utrace) {
+				CONF_HANDLE_BOOL(opt_utrace, utrace)
+			}
+			if (config_valgrind) {
+				bool hit;
+				CONF_HANDLE_BOOL_HIT(opt_valgrind,
+				    valgrind, hit)
+				if (config_fill && opt_valgrind && hit) {
+					opt_junk = false;
+					opt_zero = false;
+					if (opt_quarantine == 0) {
+						opt_quarantine =
+						    JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
+					}
+					opt_redzone = true;
+				}
+				if (hit)
+					continue;
+			}
+			if (config_xmalloc) {
+				CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
+			}
+			if (config_tcache) {
+				CONF_HANDLE_BOOL(opt_tcache, tcache)
+				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
+				    lg_tcache_max, -1,
+				    (sizeof(size_t) << 3) - 1)
+			}
+			if (config_prof) {
+				CONF_HANDLE_BOOL(opt_prof, prof)
+				CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
+				    "jeprof")
+				CONF_HANDLE_BOOL(opt_prof_active, prof_active)
+				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
+				    lg_prof_sample, 0,
+				    (sizeof(uint64_t) << 3) - 1)
+				CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
+				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
+				    lg_prof_interval, -1,
+				    (sizeof(uint64_t) << 3) - 1)
+				CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
+				CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
+			}
+			malloc_conf_error("Invalid conf pair", k, klen, v,
+			    vlen);
+#undef CONF_HANDLE_BOOL
+#undef CONF_HANDLE_SIZE_T
+#undef CONF_HANDLE_SSIZE_T
+#undef CONF_HANDLE_CHAR_P
+		}
+	}
+}
+
+static bool
+malloc_init_hard(void)
+{
+	arena_t *init_arenas[1];
+
+	malloc_mutex_lock(&init_lock);
+	if (malloc_initialized || IS_INITIALIZER) {
+		/*
+		 * Another thread initialized the allocator before this one
+		 * acquired init_lock, or this thread is the initializing
+		 * thread, and it is recursively allocating.
+		 */
+		malloc_mutex_unlock(&init_lock);
+		return (false);
+	}
+#ifdef JEMALLOC_THREADED_INIT
+	if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
+		/* Busy-wait until the initializing thread completes. */
+		do {
+			malloc_mutex_unlock(&init_lock);
+			CPU_SPINWAIT;
+			malloc_mutex_lock(&init_lock);
+		} while (malloc_initialized == false);
+		malloc_mutex_unlock(&init_lock);
+		return (false);
+	}
+#endif
+	malloc_initializer = INITIALIZER;
+
+	malloc_tsd_boot();
+	if (config_prof)
+		prof_boot0();
+
+	malloc_conf_init();
+
+#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
+	/* Register fork handlers. */
+	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
+	    jemalloc_postfork_child) != 0) {
+		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
+		if (opt_abort)
+			abort();
+	}
+#endif
+
+	if (opt_stats_print) {
+		/* Print statistics at exit. */
+		if (atexit(stats_print_atexit) != 0) {
+			malloc_write("<jemalloc>: Error in atexit()\n");
+			if (opt_abort)
+				abort();
+		}
+	}
+
+	if (base_boot()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (chunk_boot0()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (ctl_boot()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (config_prof)
+		prof_boot1();
+
+	arena_boot();
+
+	if (config_tcache && tcache_boot0()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (huge_boot()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (malloc_mutex_init(&arenas_lock))
+		return (true);
+
+	/*
+	 * Create enough scaffolding to allow recursive allocation in
+	 * malloc_ncpus().
+	 */
+	narenas = 1;
+	arenas = init_arenas;
+	memset(arenas, 0, sizeof(arena_t *) * narenas);
+
+	/*
+	 * Initialize one arena here.  The rest are lazily created in
+	 * choose_arena_hard().
+	 */
+	arenas_extend(0);
+	if (arenas[0] == NULL) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	/* Initialize allocation counters before any allocations can occur. */
+	if (config_stats && thread_allocated_tsd_boot()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (arenas_tsd_boot()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (config_tcache && tcache_boot1()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (config_fill && quarantine_boot()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (config_prof && prof_boot2()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	/* Get number of CPUs. */
+	malloc_mutex_unlock(&init_lock);
+	ncpus = malloc_ncpus();
+	malloc_mutex_lock(&init_lock);
+
+	if (chunk_boot1()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (mutex_boot()) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+
+	if (opt_narenas == 0) {
+		/*
+		 * For SMP systems, create more than one arena per CPU by
+		 * default.
+		 */
+		if (ncpus > 1)
+			opt_narenas = ncpus << 2;
+		else
+			opt_narenas = 1;
+	}
+	narenas = opt_narenas;
+	/*
+	 * Make sure that the arenas array can be allocated.  In practice, this
+	 * limit is enough to allow the allocator to function, but the ctl
+	 * machinery will fail to allocate memory at far lower limits.
+	 */
+	if (narenas > chunksize / sizeof(arena_t *)) {
+		narenas = chunksize / sizeof(arena_t *);
+		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
+		    narenas);
+	}
+
+	/* Allocate and initialize arenas. */
+	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
+	if (arenas == NULL) {
+		malloc_mutex_unlock(&init_lock);
+		return (true);
+	}
+	/*
+	 * Zero the array.  In practice, this should always be pre-zeroed,
+	 * since it was just mmap()ed, but let's be sure.
+	 */
+	memset(arenas, 0, sizeof(arena_t *) * narenas);
+	/* Copy the pointer to the one arena that was already initialized. */
+	arenas[0] = init_arenas[0];
+
+	malloc_initialized = true;
+	malloc_mutex_unlock(&init_lock);
+	return (false);
+}
+
+/*
+ * End initialization functions.
+ */
+/******************************************************************************/
+/*
+ * Begin malloc(3)-compatible functions.
+ */
+
+JEMALLOC_ATTR(malloc)
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_malloc(size_t size)
+{
+	void *ret;
+	size_t usize;
+	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
+
+	if (malloc_init()) {
+		ret = NULL;
+		goto label_oom;
+	}
+
+	if (size == 0)
+		size = 1;
+
+	if (config_prof && opt_prof) {
+		usize = s2u(size);
+		PROF_ALLOC_PREP(1, usize, cnt);
+		if (cnt == NULL) {
+			ret = NULL;
+			goto label_oom;
+		}
+		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
+		    SMALL_MAXCLASS) {
+			ret = imalloc(SMALL_MAXCLASS+1);
+			if (ret != NULL)
+				arena_prof_promoted(ret, usize);
+		} else
+			ret = imalloc(size);
+	} else {
+		if (config_stats || (config_valgrind && opt_valgrind))
+			usize = s2u(size);
+		ret = imalloc(size);
+	}
+
+label_oom:
+	if (ret == NULL) {
+		if (config_xmalloc && opt_xmalloc) {
+			malloc_write("<jemalloc>: Error in malloc(): "
+			    "out of memory\n");
+			abort();
+		}
+		errno = ENOMEM;
+	}
+	if (config_prof && opt_prof && ret != NULL)
+		prof_malloc(ret, usize, cnt);
+	if (config_stats && ret != NULL) {
+		assert(usize == isalloc(ret, config_prof));
+		thread_allocated_tsd_get()->allocated += usize;
+	}
+	UTRACE(0, size, ret);
+	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
+	return (ret);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+#ifdef JEMALLOC_PROF
+/*
+ * Avoid any uncertainty as to how many backtrace frames to ignore in
+ * PROF_ALLOC_PREP().
+ */
+JEMALLOC_ATTR(noinline)
+#endif
+static int
+imemalign(void **memptr, size_t alignment, size_t size,
+    size_t min_alignment)
+{
+	int ret;
+	size_t usize;
+	void *result;
+	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
+
+	assert(min_alignment != 0);
+
+	if (malloc_init())
+		result = NULL;
+	else {
+		if (size == 0)
+			size = 1;
+
+		/* Make sure that alignment is a large enough power of 2. */
+		if (((alignment - 1) & alignment) != 0
+		    || (alignment < min_alignment)) {
+			if (config_xmalloc && opt_xmalloc) {
+				malloc_write("<jemalloc>: Error allocating "
+				    "aligned memory: invalid alignment\n");
+				abort();
+			}
+			result = NULL;
+			ret = EINVAL;
+			goto label_return;
+		}
+
+		usize = sa2u(size, alignment);
+		if (usize == 0) {
+			result = NULL;
+			ret = ENOMEM;
+			goto label_return;
+		}
+
+		if (config_prof && opt_prof) {
+			PROF_ALLOC_PREP(2, usize, cnt);
+			if (cnt == NULL) {
+				result = NULL;
+				ret = EINVAL;
+			} else {
+				if (prof_promote && (uintptr_t)cnt !=
+				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
+					assert(sa2u(SMALL_MAXCLASS+1,
+					    alignment) != 0);
+					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
+					    alignment), alignment, false);
+					if (result != NULL) {
+						arena_prof_promoted(result,
+						    usize);
+					}
+				} else {
+					result = ipalloc(usize, alignment,
+					    false);
+				}
+			}
+		} else
+			result = ipalloc(usize, alignment, false);
+	}
+
+	if (result == NULL) {
+		if (config_xmalloc && opt_xmalloc) {
+			malloc_write("<jemalloc>: Error allocating aligned "
+			    "memory: out of memory\n");
+			abort();
+		}
+		ret = ENOMEM;
+		goto label_return;
+	}
+
+	*memptr = result;
+	ret = 0;
+
+label_return:
+	if (config_stats && result != NULL) {
+		assert(usize == isalloc(result, config_prof));
+		thread_allocated_tsd_get()->allocated += usize;
+	}
+	if (config_prof && opt_prof && result != NULL)
+		prof_malloc(result, usize, cnt);
+	UTRACE(0, size, result);
+	return (ret);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+je_posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+	int ret = imemalign(memptr, alignment, size, sizeof(void *));
+	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
+	    config_prof), false);
+	return (ret);
+}
+
+JEMALLOC_ATTR(malloc)
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_aligned_alloc(size_t alignment, size_t size)
+{
+	void *ret;
+	int err;
+
+	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
+		ret = NULL;
+		errno = err;
+	}
+	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
+	    false);
+	return (ret);
+}
+
+JEMALLOC_ATTR(malloc)
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_calloc(size_t num, size_t size)
+{
+	void *ret;
+	size_t num_size;
+	size_t usize;
+	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
+
+	if (malloc_init()) {
+		num_size = 0;
+		ret = NULL;
+		goto label_return;
+	}
+
+	num_size = num * size;
+	if (num_size == 0) {
+		if (num == 0 || size == 0)
+			num_size = 1;
+		else {
+			ret = NULL;
+			goto label_return;
+		}
+	/*
+	 * Try to avoid division here.  We know that it isn't possible to
+	 * overflow during multiplication if neither operand uses any of the
+	 * most significant half of the bits in a size_t.
+	 */
+	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
+	    && (num_size / size != num)) {
+		/* size_t overflow. */
+		ret = NULL;
+		goto label_return;
+	}
+
+	if (config_prof && opt_prof) {
+		usize = s2u(num_size);
+		PROF_ALLOC_PREP(1, usize, cnt);
+		if (cnt == NULL) {
+			ret = NULL;
+			goto label_return;
+		}
+		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
+		    <= SMALL_MAXCLASS) {
+			ret = icalloc(SMALL_MAXCLASS+1);
+			if (ret != NULL)
+				arena_prof_promoted(ret, usize);
+		} else
+			ret = icalloc(num_size);
+	} else {
+		if (config_stats || (config_valgrind && opt_valgrind))
+			usize = s2u(num_size);
+		ret = icalloc(num_size);
+	}
+
+label_return:
+	if (ret == NULL) {
+		if (config_xmalloc && opt_xmalloc) {
+			malloc_write("<jemalloc>: Error in calloc(): out of "
+			    "memory\n");
+			abort();
+		}
+		errno = ENOMEM;
+	}
+
+	if (config_prof && opt_prof && ret != NULL)
+		prof_malloc(ret, usize, cnt);
+	if (config_stats && ret != NULL) {
+		assert(usize == isalloc(ret, config_prof));
+		thread_allocated_tsd_get()->allocated += usize;
+	}
+	UTRACE(0, num_size, ret);
+	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
+	return (ret);
+}
+
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_realloc(void *ptr, size_t size)
+{
+	void *ret;
+	size_t usize;
+	size_t old_size = 0;
+	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
+	prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
+
+	if (size == 0) {
+		if (ptr != NULL) {
+			/* realloc(ptr, 0) is equivalent to free(p). */
+			if (config_prof) {
+				old_size = isalloc(ptr, true);
+				if (config_valgrind && opt_valgrind)
+					old_rzsize = p2rz(ptr);
+			} else if (config_stats) {
+				old_size = isalloc(ptr, false);
+				if (config_valgrind && opt_valgrind)
+					old_rzsize = u2rz(old_size);
+			} else if (config_valgrind && opt_valgrind) {
+				old_size = isalloc(ptr, false);
+				old_rzsize = u2rz(old_size);
+			}
+			if (config_prof && opt_prof) {
+				old_ctx = prof_ctx_get(ptr);
+				cnt = NULL;
+			}
+			iqalloc(ptr);
+			ret = NULL;
+			goto label_return;
+		} else
+			size = 1;
+	}
+
+	if (ptr != NULL) {
+		assert(malloc_initialized || IS_INITIALIZER);
+
+		if (config_prof) {
+			old_size = isalloc(ptr, true);
+			if (config_valgrind && opt_valgrind)
+				old_rzsize = p2rz(ptr);
+		} else if (config_stats) {
+			old_size = isalloc(ptr, false);
+			if (config_valgrind && opt_valgrind)
+				old_rzsize = u2rz(old_size);
+		} else if (config_valgrind && opt_valgrind) {
+			old_size = isalloc(ptr, false);
+			old_rzsize = u2rz(old_size);
+		}
+		if (config_prof && opt_prof) {
+			usize = s2u(size);
+			old_ctx = prof_ctx_get(ptr);
+			PROF_ALLOC_PREP(1, usize, cnt);
+			if (cnt == NULL) {
+				old_ctx = NULL;
+				ret = NULL;
+				goto label_oom;
+			}
+			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
+			    usize <= SMALL_MAXCLASS) {
+				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
+				    false, false);
+				if (ret != NULL)
+					arena_prof_promoted(ret, usize);
+				else
+					old_ctx = NULL;
+			} else {
+				ret = iralloc(ptr, size, 0, 0, false, false);
+				if (ret == NULL)
+					old_ctx = NULL;
+			}
+		} else {
+			if (config_stats || (config_valgrind && opt_valgrind))
+				usize = s2u(size);
+			ret = iralloc(ptr, size, 0, 0, false, false);
+		}
+
+label_oom:
+		if (ret == NULL) {
+			if (config_xmalloc && opt_xmalloc) {
+				malloc_write("<jemalloc>: Error in realloc(): "
+				    "out of memory\n");
+				abort();
+			}
+			errno = ENOMEM;
+		}
+	} else {
+		/* realloc(NULL, size) is equivalent to malloc(size). */
+		if (config_prof && opt_prof)
+			old_ctx = NULL;
+		if (malloc_init()) {
+			if (config_prof && opt_prof)
+				cnt = NULL;
+			ret = NULL;
+		} else {
+			if (config_prof && opt_prof) {
+				usize = s2u(size);
+				PROF_ALLOC_PREP(1, usize, cnt);
+				if (cnt == NULL)
+					ret = NULL;
+				else {
+					if (prof_promote && (uintptr_t)cnt !=
+					    (uintptr_t)1U && usize <=
+					    SMALL_MAXCLASS) {
+						ret = imalloc(SMALL_MAXCLASS+1);
+						if (ret != NULL) {
+							arena_prof_promoted(ret,
+							    usize);
+						}
+					} else
+						ret = imalloc(size);
+				}
+			} else {
+				if (config_stats || (config_valgrind &&
+				    opt_valgrind))
+					usize = s2u(size);
+				ret = imalloc(size);
+			}
+		}
+
+		if (ret == NULL) {
+			if (config_xmalloc && opt_xmalloc) {
+				malloc_write("<jemalloc>: Error in realloc(): "
+				    "out of memory\n");
+				abort();
+			}
+			errno = ENOMEM;
+		}
+	}
+
+label_return:
+	if (config_prof && opt_prof)
+		prof_realloc(ret, usize, cnt, old_size, old_ctx);
+	if (config_stats && ret != NULL) {
+		thread_allocated_t *ta;
+		assert(usize == isalloc(ret, config_prof));
+		ta = thread_allocated_tsd_get();
+		ta->allocated += usize;
+		ta->deallocated += old_size;
+	}
+	UTRACE(ptr, size, ret);
+	JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
+	return (ret);
+}
+
+JEMALLOC_ATTR(visibility("default"))
+void
+je_free(void *ptr)
+{
+
+	UTRACE(ptr, 0, 0);
+	if (ptr != NULL) {
+		size_t usize;
+		size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+		assert(malloc_initialized || IS_INITIALIZER);
+
+		if (config_prof && opt_prof) {
+			usize = isalloc(ptr, config_prof);
+			prof_free(ptr, usize);
+		} else if (config_stats || config_valgrind)
+			usize = isalloc(ptr, config_prof);
+		if (config_stats)
+			thread_allocated_tsd_get()->deallocated += usize;
+		if (config_valgrind && opt_valgrind)
+			rzsize = p2rz(ptr);
+		iqalloc(ptr);
+		JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+	}
+}
+
+/*
+ * End malloc(3)-compatible functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard override functions.
+ */
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_ATTR(malloc)
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_memalign(size_t alignment, size_t size)
+{
+	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+	imemalign(&ret, alignment, size, 1);
+	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
+	return (ret);
+}
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_ATTR(malloc)
+JEMALLOC_ATTR(visibility("default"))
+void *
+je_valloc(size_t size)
+{
+	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+	imemalign(&ret, PAGE, size, 1);
+	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
+	return (ret);
+}
+#endif
+
+/*
+ * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
+ * #define je_malloc malloc
+ */
+#define	malloc_is_malloc 1
+#define	is_malloc_(a) malloc_is_ ## a
+#define	is_malloc(a) is_malloc_(a)
+
+#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
+/*
+ * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
+ * to inconsistently reference libc's malloc(3)-compatible functions
+ * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
+ *
+ * These definitions interpose hooks in glibc.  The functions are actually
+ * passed an extra argument for the caller return address, which will be
+ * ignored.
+ */
+JEMALLOC_ATTR(visibility("default"))
+void (* const __free_hook)(void *ptr) = je_free;
+
+JEMALLOC_ATTR(visibility("default"))
+void *(* const __malloc_hook)(size_t size) = je_malloc;
+
+JEMALLOC_ATTR(visibility("default"))
+void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
+
+JEMALLOC_ATTR(visibility("default"))
+void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
+#endif
+
+/*
+ * End non-standard override functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard functions.
+ */
+
+JEMALLOC_ATTR(visibility("default"))
+size_t
+je_malloc_usable_size(const void *ptr)
+{
+	size_t ret;
+
+	assert(malloc_initialized || IS_INITIALIZER);
+
+	if (config_ivsalloc)
+		ret = ivsalloc(ptr, config_prof);
+	else
+		ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
+
+	return (ret);
+}
+
+JEMALLOC_ATTR(visibility("default"))
+void
+je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+    const char *opts)
+{
+
+	stats_print(write_cb, cbopaque, opts);
+}
+
+JEMALLOC_ATTR(visibility("default"))
+int
+je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
+    size_t newlen)
+{
+
+	if (malloc_init())
+		return (EAGAIN);
+
+	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
+}
+
+JEMALLOC_ATTR(visibility("default"))
+int
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
+{
+
+	if (malloc_init())
+		return (EAGAIN);
+
+	return (ctl_nametomib(name, mibp, miblenp));
+}
+
+JEMALLOC_ATTR(visibility("default"))
+int
+je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+  void *newp, size_t newlen)
+{
+
+	if (malloc_init())
+		return (EAGAIN);
+
+	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
+}
+
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * Begin experimental functions.
+ */
+#ifdef JEMALLOC_EXPERIMENTAL
+
+JEMALLOC_INLINE void *
+iallocm(size_t usize, size_t alignment, bool zero)
+{
+
+	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
+	    alignment)));
+
+	if (alignment != 0)
+		return (ipalloc(usize, alignment, zero));
+	else if (zero)
+		return (icalloc(usize));
+	else
+		return (imalloc(usize));
+}
+
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
+{
+	void *p;
+	size_t usize;
+	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+	    & (SIZE_T_MAX-1));
+	bool zero = flags & ALLOCM_ZERO;
+	prof_thr_cnt_t *cnt;
+
+	assert(ptr != NULL);
+	assert(size != 0);
+
+	if (malloc_init())
+		goto label_oom;
+
+	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+	if (usize == 0)
+		goto label_oom;
+
+	if (config_prof && opt_prof) {
+		PROF_ALLOC_PREP(1, usize, cnt);
+		if (cnt == NULL)
+			goto label_oom;
+		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
+		    SMALL_MAXCLASS) {
+			size_t usize_promoted = (alignment == 0) ?
+			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
+			    alignment);
+			assert(usize_promoted != 0);
+			p = iallocm(usize_promoted, alignment, zero);
+			if (p == NULL)
+				goto label_oom;
+			arena_prof_promoted(p, usize);
+		} else {
+			p = iallocm(usize, alignment, zero);
+			if (p == NULL)
+				goto label_oom;
+		}
+		prof_malloc(p, usize, cnt);
+	} else {
+		p = iallocm(usize, alignment, zero);
+		if (p == NULL)
+			goto label_oom;
+	}
+	if (rsize != NULL)
+		*rsize = usize;
+
+	*ptr = p;
+	if (config_stats) {
+		assert(usize == isalloc(p, config_prof));
+		thread_allocated_tsd_get()->allocated += usize;
+	}
+	UTRACE(0, size, p);
+	JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
+	return (ALLOCM_SUCCESS);
+label_oom:
+	if (config_xmalloc && opt_xmalloc) {
+		malloc_write("<jemalloc>: Error in allocm(): "
+		    "out of memory\n");
+		abort();
+	}
+	*ptr = NULL;
+	UTRACE(0, size, 0);
+	return (ALLOCM_ERR_OOM);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
+{
+	void *p, *q;
+	size_t usize;
+	size_t old_size;
+	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+	    & (SIZE_T_MAX-1));
+	bool zero = flags & ALLOCM_ZERO;
+	bool no_move = flags & ALLOCM_NO_MOVE;
+	prof_thr_cnt_t *cnt;
+
+	assert(ptr != NULL);
+	assert(*ptr != NULL);
+	assert(size != 0);
+	assert(SIZE_T_MAX - size >= extra);
+	assert(malloc_initialized || IS_INITIALIZER);
+
+	p = *ptr;
+	if (config_prof && opt_prof) {
+		/*
+		 * usize isn't knowable before iralloc() returns when extra is
+		 * non-zero.  Therefore, compute its maximum possible value and
+		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
+		 * backtrace.  prof_realloc() will use the actual usize to
+		 * decide whether to sample.
+		 */
+		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
+		    sa2u(size+extra, alignment);
+		prof_ctx_t *old_ctx = prof_ctx_get(p);
+		old_size = isalloc(p, true);
+		if (config_valgrind && opt_valgrind)
+			old_rzsize = p2rz(p);
+		PROF_ALLOC_PREP(1, max_usize, cnt);
+		if (cnt == NULL)
+			goto label_oom;
+		/*
+		 * Use minimum usize to determine whether promotion may happen.
+		 */
+		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
+		    && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
+		    <= SMALL_MAXCLASS) {
+			q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
+			    alignment, zero, no_move);
+			if (q == NULL)
+				goto label_err;
+			if (max_usize < PAGE) {
+				usize = max_usize;
+				arena_prof_promoted(q, usize);
+			} else
+				usize = isalloc(q, config_prof);
+		} else {
+			q = iralloc(p, size, extra, alignment, zero, no_move);
+			if (q == NULL)
+				goto label_err;
+			usize = isalloc(q, config_prof);
+		}
+		prof_realloc(q, usize, cnt, old_size, old_ctx);
+		if (rsize != NULL)
+			*rsize = usize;
+	} else {
+		if (config_stats) {
+			old_size = isalloc(p, false);
+			if (config_valgrind && opt_valgrind)
+				old_rzsize = u2rz(old_size);
+		} else if (config_valgrind && opt_valgrind) {
+			old_size = isalloc(p, false);
+			old_rzsize = u2rz(old_size);
+		}
+		q = iralloc(p, size, extra, alignment, zero, no_move);
+		if (q == NULL)
+			goto label_err;
+		if (config_stats)
+			usize = isalloc(q, config_prof);
+		if (rsize != NULL) {
+			if (config_stats == false)
+				usize = isalloc(q, config_prof);
+			*rsize = usize;
+		}
+	}
+
+	*ptr = q;
+	if (config_stats) {
+		thread_allocated_t *ta;
+		ta = thread_allocated_tsd_get();
+		ta->allocated += usize;
+		ta->deallocated += old_size;
+	}
+	UTRACE(p, size, q);
+	JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
+	return (ALLOCM_SUCCESS);
+label_err:
+	if (no_move) {
+		UTRACE(p, size, q);
+		return (ALLOCM_ERR_NOT_MOVED);
+	}
+label_oom:
+	if (config_xmalloc && opt_xmalloc) {
+		malloc_write("<jemalloc>: Error in rallocm(): "
+		    "out of memory\n");
+		abort();
+	}
+	UTRACE(p, size, 0);
+	return (ALLOCM_ERR_OOM);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+je_sallocm(const void *ptr, size_t *rsize, int flags)
+{
+	size_t sz;
+
+	assert(malloc_initialized || IS_INITIALIZER);
+
+	if (config_ivsalloc)
+		sz = ivsalloc(ptr, config_prof);
+	else {
+		assert(ptr != NULL);
+		sz = isalloc(ptr, config_prof);
+	}
+	assert(rsize != NULL);
+	*rsize = sz;
+
+	return (ALLOCM_SUCCESS);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+je_dallocm(void *ptr, int flags)
+{
+	size_t usize;
+	size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+	assert(ptr != NULL);
+	assert(malloc_initialized || IS_INITIALIZER);
+
+	UTRACE(ptr, 0, 0);
+	if (config_stats || config_valgrind)
+		usize = isalloc(ptr, config_prof);
+	if (config_prof && opt_prof) {
+		if (config_stats == false && config_valgrind == false)
+			usize = isalloc(ptr, config_prof);
+		prof_free(ptr, usize);
+	}
+	if (config_stats)
+		thread_allocated_tsd_get()->deallocated += usize;
+	if (config_valgrind && opt_valgrind)
+		rzsize = p2rz(ptr);
+	iqalloc(ptr);
+	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+
+	return (ALLOCM_SUCCESS);
+}
+
+JEMALLOC_ATTR(visibility("default"))
+int
+je_nallocm(size_t *rsize, size_t size, int flags)
+{
+	size_t usize;
+	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+	    & (SIZE_T_MAX-1));
+
+	assert(size != 0);
+
+	if (malloc_init())
+		return (ALLOCM_ERR_OOM);
+
+	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+	if (usize == 0)
+		return (ALLOCM_ERR_OOM);
+
+	if (rsize != NULL)
+		*rsize = usize;
+	return (ALLOCM_SUCCESS);
+}
+
+#endif
+/*
+ * End experimental functions.
+ */
+/******************************************************************************/
+/*
+ * The following functions are used by threading libraries for protection of
+ * malloc during fork().
+ */
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+void
+jemalloc_prefork(void)
+#else
+void
+_malloc_prefork(void)
+#endif
+{
+	unsigned i;
+
+	/* Acquire all mutexes in a safe order. */
+	malloc_mutex_prefork(&arenas_lock);
+	for (i = 0; i < narenas; i++) {
+		if (arenas[i] != NULL)
+			arena_prefork(arenas[i]);
+	}
+	base_prefork();
+	huge_prefork();
+	chunk_dss_prefork();
+}
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+void
+jemalloc_postfork_parent(void)
+#else
+void
+_malloc_postfork(void)
+#endif
+{
+	unsigned i;
+
+	/* Release all mutexes, now that fork() has completed. */
+	chunk_dss_postfork_parent();
+	huge_postfork_parent();
+	base_postfork_parent();
+	for (i = 0; i < narenas; i++) {
+		if (arenas[i] != NULL)
+			arena_postfork_parent(arenas[i]);
+	}
+	malloc_mutex_postfork_parent(&arenas_lock);
+}
+
+void
+jemalloc_postfork_child(void)
+{
+	unsigned i;
+
+	/* Release all mutexes, now that fork() has completed. */
+	chunk_dss_postfork_child();
+	huge_postfork_child();
+	base_postfork_child();
+	for (i = 0; i < narenas; i++) {
+		if (arenas[i] != NULL)
+			arena_postfork_child(arenas[i]);
+	}
+	malloc_mutex_postfork_child(&arenas_lock);
+}
+
+/******************************************************************************/
+/*
+ * The following functions are used for TLS allocation/deallocation in static
+ * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
+ * is that these avoid accessing TLS variables.
+ */
+
+static void *
+a0alloc(size_t size, bool zero)
+{
+
+	if (malloc_init())
+		return (NULL);
+
+	if (size == 0)
+		size = 1;
+
+	if (size <= arena_maxclass)
+		return (arena_malloc(arenas[0], size, zero, false));
+	else
+		return (huge_malloc(size, zero));
+}
+
+void *
+a0malloc(size_t size)
+{
+
+	return (a0alloc(size, false));
+}
+
+void *
+a0calloc(size_t num, size_t size)
+{
+
+	return (a0alloc(num * size, true));
+}
+
+void
+a0free(void *ptr)
+{
+	arena_chunk_t *chunk;
+
+	if (ptr == NULL)
+		return;
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr)
+		arena_dalloc(chunk->arena, chunk, ptr, false);
+	else
+		huge_dalloc(ptr, true);
+}
+
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/mb.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/mb.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,2 @@
+#define	JEMALLOC_MB_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/mutex.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/mutex.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,153 @@
+#define	JEMALLOC_MUTEX_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#ifdef JEMALLOC_LAZY_LOCK
+#include <dlfcn.h>
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+#ifdef JEMALLOC_LAZY_LOCK
+bool isthreaded = false;
+#endif
+#ifdef JEMALLOC_MUTEX_INIT_CB
+static bool		postpone_init = true;
+static malloc_mutex_t	*postponed_mutexes = NULL;
+#endif
+
+#ifdef JEMALLOC_LAZY_LOCK
+static void	pthread_create_once(void);
+#endif
+
+/******************************************************************************/
+/*
+ * We intercept pthread_create() calls in order to toggle isthreaded if the
+ * process goes multi-threaded.
+ */
+
+#ifdef JEMALLOC_LAZY_LOCK
+static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
+    void *(*)(void *), void *__restrict);
+
+static void
+pthread_create_once(void)
+{
+
+	pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
+	if (pthread_create_fptr == NULL) {
+		malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
+		    "\"pthread_create\")\n");
+		abort();
+	}
+
+	isthreaded = true;
+}
+
+JEMALLOC_ATTR(visibility("default"))
+int
+pthread_create(pthread_t *__restrict thread,
+    const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
+    void *__restrict arg)
+{
+	static pthread_once_t once_control = PTHREAD_ONCE_INIT;
+
+	pthread_once(&once_control, pthread_create_once);
+
+	return (pthread_create_fptr(thread, attr, start_routine, arg));
+}
+#endif
+
+/******************************************************************************/
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+    void *(calloc_cb)(size_t, size_t));
+
+__weak_reference(_pthread_mutex_init_calloc_cb_stub,
+    _pthread_mutex_init_calloc_cb);
+
+int
+_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
+    void *(calloc_cb)(size_t, size_t))
+{
+
+	return (0);
+}
+#endif
+
+bool
+malloc_mutex_init(malloc_mutex_t *mutex)
+{
+#ifdef JEMALLOC_OSSPIN
+	mutex->lock = 0;
+#elif (defined(JEMALLOC_MUTEX_INIT_CB))
+	if (postpone_init) {
+		mutex->postponed_next = postponed_mutexes;
+		postponed_mutexes = mutex;
+	} else {
+		if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
+		    0)
+			return (true);
+	}
+#else
+	pthread_mutexattr_t attr;
+
+	if (pthread_mutexattr_init(&attr) != 0)
+		return (true);
+	pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
+	if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
+		pthread_mutexattr_destroy(&attr);
+		return (true);
+	}
+	pthread_mutexattr_destroy(&attr);
+
+#endif
+	return (false);
+}
+
+void
+malloc_mutex_prefork(malloc_mutex_t *mutex)
+{
+
+	malloc_mutex_lock(mutex);
+}
+
+void
+malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
+{
+
+	malloc_mutex_unlock(mutex);
+}
+
+void
+malloc_mutex_postfork_child(malloc_mutex_t *mutex)
+{
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+	malloc_mutex_unlock(mutex);
+#else
+	if (malloc_mutex_init(mutex)) {
+		malloc_printf("<jemalloc>: Error re-initializing mutex in "
+		    "child\n");
+		if (opt_abort)
+			abort();
+	}
+#endif
+}
+
+bool
+mutex_boot(void)
+{
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+	postpone_init = false;
+	while (postponed_mutexes != NULL) {
+		if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
+		    base_calloc) != 0)
+			return (true);
+		postponed_mutexes = postponed_mutexes->postponed_next;
+	}
+#endif
+	return (false);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/prof.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/prof.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,1243 @@
+#define	JEMALLOC_PROF_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+/******************************************************************************/
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+#define	UNW_LOCAL_ONLY
+#include <libunwind.h>
+#endif
+
+#ifdef JEMALLOC_PROF_LIBGCC
+#include <unwind.h>
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
+
+bool		opt_prof = false;
+bool		opt_prof_active = true;
+size_t		opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
+ssize_t		opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
+bool		opt_prof_gdump = false;
+bool		opt_prof_leak = false;
+bool		opt_prof_accum = true;
+char		opt_prof_prefix[PATH_MAX + 1];
+
+uint64_t	prof_interval;
+bool		prof_promote;
+
+/*
+ * Table of mutexes that are shared among ctx's.  These are leaf locks, so
+ * there is no problem with using them for more than one ctx at the same time.
+ * The primary motivation for this sharing though is that ctx's are ephemeral,
+ * and destroying mutexes causes complications for systems that allocate when
+ * creating/destroying mutexes.
+ */
+static malloc_mutex_t	*ctx_locks;
+static unsigned		cum_ctxs; /* Atomic counter. */
+
+/*
+ * Global hash of (prof_bt_t *)-->(prof_ctx_t *).  This is the master data
+ * structure that knows about all backtraces currently captured.
+ */
+static ckh_t		bt2ctx;
+static malloc_mutex_t	bt2ctx_mtx;
+
+static malloc_mutex_t	prof_dump_seq_mtx;
+static uint64_t		prof_dump_seq;
+static uint64_t		prof_dump_iseq;
+static uint64_t		prof_dump_mseq;
+static uint64_t		prof_dump_useq;
+
+/*
+ * This buffer is rather large for stack allocation, so use a single buffer for
+ * all profile dumps.  The buffer is implicitly protected by bt2ctx_mtx, since
+ * it must be locked anyway during dumping.
+ */
+static char		prof_dump_buf[PROF_DUMP_BUFSIZE];
+static unsigned		prof_dump_buf_end;
+static int		prof_dump_fd;
+
+/* Do not dump any profiles until bootstrapping is complete. */
+static bool		prof_booted = false;
+
+static malloc_mutex_t	enq_mtx;
+static bool		enq;
+static bool		enq_idump;
+static bool		enq_gdump;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static prof_bt_t	*bt_dup(prof_bt_t *bt);
+static void	bt_destroy(prof_bt_t *bt);
+#ifdef JEMALLOC_PROF_LIBGCC
+static _Unwind_Reason_Code	prof_unwind_init_callback(
+    struct _Unwind_Context *context, void *arg);
+static _Unwind_Reason_Code	prof_unwind_callback(
+    struct _Unwind_Context *context, void *arg);
+#endif
+static bool	prof_flush(bool propagate_err);
+static bool	prof_write(bool propagate_err, const char *s);
+static bool	prof_printf(bool propagate_err, const char *format, ...)
+    JEMALLOC_ATTR(format(printf, 2, 3));
+static void	prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all,
+    size_t *leak_nctx);
+static void	prof_ctx_destroy(prof_ctx_t *ctx);
+static void	prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt);
+static bool	prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx,
+    prof_bt_t *bt);
+static bool	prof_dump_maps(bool propagate_err);
+static bool	prof_dump(bool propagate_err, const char *filename,
+    bool leakcheck);
+static void	prof_dump_filename(char *filename, char v, int64_t vseq);
+static void	prof_fdump(void);
+static void	prof_bt_hash(const void *key, unsigned minbits, size_t *hash1,
+    size_t *hash2);
+static bool	prof_bt_keycomp(const void *k1, const void *k2);
+static malloc_mutex_t	*prof_ctx_mutex_choose(void);
+
+/******************************************************************************/
+
+void
+bt_init(prof_bt_t *bt, void **vec)
+{
+
+	cassert(config_prof);
+
+	bt->vec = vec;
+	bt->len = 0;
+}
+
+static void
+bt_destroy(prof_bt_t *bt)
+{
+
+	cassert(config_prof);
+
+	idalloc(bt);
+}
+
+static prof_bt_t *
+bt_dup(prof_bt_t *bt)
+{
+	prof_bt_t *ret;
+
+	cassert(config_prof);
+
+	/*
+	 * Create a single allocation that has space for vec immediately
+	 * following the prof_bt_t structure.  The backtraces that get
+	 * stored in the backtrace caches are copied from stack-allocated
+	 * temporary variables, so size is known at creation time.  Making this
+	 * a contiguous object improves cache locality.
+	 */
+	ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
+	    (bt->len * sizeof(void *)));
+	if (ret == NULL)
+		return (NULL);
+	ret->vec = (void **)((uintptr_t)ret +
+	    QUANTUM_CEILING(sizeof(prof_bt_t)));
+	memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
+	ret->len = bt->len;
+
+	return (ret);
+}
+
+static inline void
+prof_enter(void)
+{
+
+	cassert(config_prof);
+
+	malloc_mutex_lock(&enq_mtx);
+	enq = true;
+	malloc_mutex_unlock(&enq_mtx);
+
+	malloc_mutex_lock(&bt2ctx_mtx);
+}
+
+static inline void
+prof_leave(void)
+{
+	bool idump, gdump;
+
+	cassert(config_prof);
+
+	malloc_mutex_unlock(&bt2ctx_mtx);
+
+	malloc_mutex_lock(&enq_mtx);
+	enq = false;
+	idump = enq_idump;
+	enq_idump = false;
+	gdump = enq_gdump;
+	enq_gdump = false;
+	malloc_mutex_unlock(&enq_mtx);
+
+	if (idump)
+		prof_idump();
+	if (gdump)
+		prof_gdump();
+}
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
+{
+	unw_context_t uc;
+	unw_cursor_t cursor;
+	unsigned i;
+	int err;
+
+	cassert(config_prof);
+	assert(bt->len == 0);
+	assert(bt->vec != NULL);
+
+	unw_getcontext(&uc);
+	unw_init_local(&cursor, &uc);
+
+	/* Throw away (nignore+1) stack frames, if that many exist. */
+	for (i = 0; i < nignore + 1; i++) {
+		err = unw_step(&cursor);
+		if (err <= 0)
+			return;
+	}
+
+	/*
+	 * Iterate over stack frames until there are no more, or until no space
+	 * remains in bt.
+	 */
+	for (i = 0; i < PROF_BT_MAX; i++) {
+		unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
+		bt->len++;
+		err = unw_step(&cursor);
+		if (err <= 0)
+			break;
+	}
+}
+#elif (defined(JEMALLOC_PROF_LIBGCC))
+static _Unwind_Reason_Code
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
+{
+
+	cassert(config_prof);
+
+	return (_URC_NO_REASON);
+}
+
+static _Unwind_Reason_Code
+prof_unwind_callback(struct _Unwind_Context *context, void *arg)
+{
+	prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+
+	cassert(config_prof);
+
+	if (data->nignore > 0)
+		data->nignore--;
+	else {
+		data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
+		data->bt->len++;
+		if (data->bt->len == data->max)
+			return (_URC_END_OF_STACK);
+	}
+
+	return (_URC_NO_REASON);
+}
+
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
+{
+	prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX};
+
+	cassert(config_prof);
+
+	_Unwind_Backtrace(prof_unwind_callback, &data);
+}
+#elif (defined(JEMALLOC_PROF_GCC))
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
+{
+#define	BT_FRAME(i)							\
+	if ((i) < nignore + PROF_BT_MAX) {				\
+		void *p;						\
+		if (__builtin_frame_address(i) == 0)			\
+			return;						\
+		p = __builtin_return_address(i);			\
+		if (p == NULL)						\
+			return;						\
+		if (i >= nignore) {					\
+			bt->vec[(i) - nignore] = p;			\
+			bt->len = (i) - nignore + 1;			\
+		}							\
+	} else								\
+		return;
+
+	cassert(config_prof);
+	assert(nignore <= 3);
+
+	BT_FRAME(0)
+	BT_FRAME(1)
+	BT_FRAME(2)
+	BT_FRAME(3)
+	BT_FRAME(4)
+	BT_FRAME(5)
+	BT_FRAME(6)
+	BT_FRAME(7)
+	BT_FRAME(8)
+	BT_FRAME(9)
+
+	BT_FRAME(10)
+	BT_FRAME(11)
+	BT_FRAME(12)
+	BT_FRAME(13)
+	BT_FRAME(14)
+	BT_FRAME(15)
+	BT_FRAME(16)
+	BT_FRAME(17)
+	BT_FRAME(18)
+	BT_FRAME(19)
+
+	BT_FRAME(20)
+	BT_FRAME(21)
+	BT_FRAME(22)
+	BT_FRAME(23)
+	BT_FRAME(24)
+	BT_FRAME(25)
+	BT_FRAME(26)
+	BT_FRAME(27)
+	BT_FRAME(28)
+	BT_FRAME(29)
+
+	BT_FRAME(30)
+	BT_FRAME(31)
+	BT_FRAME(32)
+	BT_FRAME(33)
+	BT_FRAME(34)
+	BT_FRAME(35)
+	BT_FRAME(36)
+	BT_FRAME(37)
+	BT_FRAME(38)
+	BT_FRAME(39)
+
+	BT_FRAME(40)
+	BT_FRAME(41)
+	BT_FRAME(42)
+	BT_FRAME(43)
+	BT_FRAME(44)
+	BT_FRAME(45)
+	BT_FRAME(46)
+	BT_FRAME(47)
+	BT_FRAME(48)
+	BT_FRAME(49)
+
+	BT_FRAME(50)
+	BT_FRAME(51)
+	BT_FRAME(52)
+	BT_FRAME(53)
+	BT_FRAME(54)
+	BT_FRAME(55)
+	BT_FRAME(56)
+	BT_FRAME(57)
+	BT_FRAME(58)
+	BT_FRAME(59)
+
+	BT_FRAME(60)
+	BT_FRAME(61)
+	BT_FRAME(62)
+	BT_FRAME(63)
+	BT_FRAME(64)
+	BT_FRAME(65)
+	BT_FRAME(66)
+	BT_FRAME(67)
+	BT_FRAME(68)
+	BT_FRAME(69)
+
+	BT_FRAME(70)
+	BT_FRAME(71)
+	BT_FRAME(72)
+	BT_FRAME(73)
+	BT_FRAME(74)
+	BT_FRAME(75)
+	BT_FRAME(76)
+	BT_FRAME(77)
+	BT_FRAME(78)
+	BT_FRAME(79)
+
+	BT_FRAME(80)
+	BT_FRAME(81)
+	BT_FRAME(82)
+	BT_FRAME(83)
+	BT_FRAME(84)
+	BT_FRAME(85)
+	BT_FRAME(86)
+	BT_FRAME(87)
+	BT_FRAME(88)
+	BT_FRAME(89)
+
+	BT_FRAME(90)
+	BT_FRAME(91)
+	BT_FRAME(92)
+	BT_FRAME(93)
+	BT_FRAME(94)
+	BT_FRAME(95)
+	BT_FRAME(96)
+	BT_FRAME(97)
+	BT_FRAME(98)
+	BT_FRAME(99)
+
+	BT_FRAME(100)
+	BT_FRAME(101)
+	BT_FRAME(102)
+	BT_FRAME(103)
+	BT_FRAME(104)
+	BT_FRAME(105)
+	BT_FRAME(106)
+	BT_FRAME(107)
+	BT_FRAME(108)
+	BT_FRAME(109)
+
+	BT_FRAME(110)
+	BT_FRAME(111)
+	BT_FRAME(112)
+	BT_FRAME(113)
+	BT_FRAME(114)
+	BT_FRAME(115)
+	BT_FRAME(116)
+	BT_FRAME(117)
+	BT_FRAME(118)
+	BT_FRAME(119)
+
+	BT_FRAME(120)
+	BT_FRAME(121)
+	BT_FRAME(122)
+	BT_FRAME(123)
+	BT_FRAME(124)
+	BT_FRAME(125)
+	BT_FRAME(126)
+	BT_FRAME(127)
+
+	/* Extras to compensate for nignore. */
+	BT_FRAME(128)
+	BT_FRAME(129)
+	BT_FRAME(130)
+#undef BT_FRAME
+}
+#else
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
+{
+
+	cassert(config_prof);
+	assert(false);
+}
+#endif
+
+prof_thr_cnt_t *
+prof_lookup(prof_bt_t *bt)
+{
+	union {
+		prof_thr_cnt_t	*p;
+		void		*v;
+	} ret;
+	prof_tdata_t *prof_tdata;
+
+	cassert(config_prof);
+
+	prof_tdata = *prof_tdata_tsd_get();
+	if (prof_tdata == NULL) {
+		prof_tdata = prof_tdata_init();
+		if (prof_tdata == NULL)
+			return (NULL);
+	}
+
+	if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
+		union {
+			prof_bt_t	*p;
+			void		*v;
+		} btkey;
+		union {
+			prof_ctx_t	*p;
+			void		*v;
+		} ctx;
+		bool new_ctx;
+
+		/*
+		 * This thread's cache lacks bt.  Look for it in the global
+		 * cache.
+		 */
+		prof_enter();
+		if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
+			/* bt has never been seen before.  Insert it. */
+			ctx.v = imalloc(sizeof(prof_ctx_t));
+			if (ctx.v == NULL) {
+				prof_leave();
+				return (NULL);
+			}
+			btkey.p = bt_dup(bt);
+			if (btkey.v == NULL) {
+				prof_leave();
+				idalloc(ctx.v);
+				return (NULL);
+			}
+			ctx.p->bt = btkey.p;
+			ctx.p->lock = prof_ctx_mutex_choose();
+			memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t));
+			ql_new(&ctx.p->cnts_ql);
+			if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
+				/* OOM. */
+				prof_leave();
+				idalloc(btkey.v);
+				idalloc(ctx.v);
+				return (NULL);
+			}
+			/*
+			 * Artificially raise curobjs, in order to avoid a race
+			 * condition with prof_ctx_merge()/prof_ctx_destroy().
+			 *
+			 * No locking is necessary for ctx here because no other
+			 * threads have had the opportunity to fetch it from
+			 * bt2ctx yet.
+			 */
+			ctx.p->cnt_merged.curobjs++;
+			new_ctx = true;
+		} else {
+			/*
+			 * Artificially raise curobjs, in order to avoid a race
+			 * condition with prof_ctx_merge()/prof_ctx_destroy().
+			 */
+			malloc_mutex_lock(ctx.p->lock);
+			ctx.p->cnt_merged.curobjs++;
+			malloc_mutex_unlock(ctx.p->lock);
+			new_ctx = false;
+		}
+		prof_leave();
+
+		/* Link a prof_thd_cnt_t into ctx for this thread. */
+		if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
+			assert(ckh_count(&prof_tdata->bt2cnt) > 0);
+			/*
+			 * Flush the least recently used cnt in order to keep
+			 * bt2cnt from becoming too large.
+			 */
+			ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
+			assert(ret.v != NULL);
+			if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
+			    NULL, NULL))
+				assert(false);
+			ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
+			prof_ctx_merge(ret.p->ctx, ret.p);
+			/* ret can now be re-used. */
+		} else {
+			assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
+			/* Allocate and partially initialize a new cnt. */
+			ret.v = imalloc(sizeof(prof_thr_cnt_t));
+			if (ret.p == NULL) {
+				if (new_ctx)
+					prof_ctx_destroy(ctx.p);
+				return (NULL);
+			}
+			ql_elm_new(ret.p, cnts_link);
+			ql_elm_new(ret.p, lru_link);
+		}
+		/* Finish initializing ret. */
+		ret.p->ctx = ctx.p;
+		ret.p->epoch = 0;
+		memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
+		if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
+			if (new_ctx)
+				prof_ctx_destroy(ctx.p);
+			idalloc(ret.v);
+			return (NULL);
+		}
+		ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
+		malloc_mutex_lock(ctx.p->lock);
+		ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
+		ctx.p->cnt_merged.curobjs--;
+		malloc_mutex_unlock(ctx.p->lock);
+	} else {
+		/* Move ret to the front of the LRU. */
+		ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
+		ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
+	}
+
+	return (ret.p);
+}
+
+static bool
+prof_flush(bool propagate_err)
+{
+	bool ret = false;
+	ssize_t err;
+
+	cassert(config_prof);
+
+	err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
+	if (err == -1) {
+		if (propagate_err == false) {
+			malloc_write("<jemalloc>: write() failed during heap "
+			    "profile flush\n");
+			if (opt_abort)
+				abort();
+		}
+		ret = true;
+	}
+	prof_dump_buf_end = 0;
+
+	return (ret);
+}
+
+static bool
+prof_write(bool propagate_err, const char *s)
+{
+	unsigned i, slen, n;
+
+	cassert(config_prof);
+
+	i = 0;
+	slen = strlen(s);
+	while (i < slen) {
+		/* Flush the buffer if it is full. */
+		if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
+			if (prof_flush(propagate_err) && propagate_err)
+				return (true);
+
+		if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
+			/* Finish writing. */
+			n = slen - i;
+		} else {
+			/* Write as much of s as will fit. */
+			n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
+		}
+		memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
+		prof_dump_buf_end += n;
+		i += n;
+	}
+
+	return (false);
+}
+
+JEMALLOC_ATTR(format(printf, 2, 3))
+static bool
+prof_printf(bool propagate_err, const char *format, ...)
+{
+	bool ret;
+	va_list ap;
+	char buf[PROF_PRINTF_BUFSIZE];
+
+	va_start(ap, format);
+	malloc_vsnprintf(buf, sizeof(buf), format, ap);
+	va_end(ap);
+	ret = prof_write(propagate_err, buf);
+
+	return (ret);
+}
+
+static void
+prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
+{
+	prof_thr_cnt_t *thr_cnt;
+	prof_cnt_t tcnt;
+
+	cassert(config_prof);
+
+	malloc_mutex_lock(ctx->lock);
+
+	memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
+	ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
+		volatile unsigned *epoch = &thr_cnt->epoch;
+
+		while (true) {
+			unsigned epoch0 = *epoch;
+
+			/* Make sure epoch is even. */
+			if (epoch0 & 1U)
+				continue;
+
+			memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
+
+			/* Terminate if epoch didn't change while reading. */
+			if (*epoch == epoch0)
+				break;
+		}
+
+		ctx->cnt_summed.curobjs += tcnt.curobjs;
+		ctx->cnt_summed.curbytes += tcnt.curbytes;
+		if (opt_prof_accum) {
+			ctx->cnt_summed.accumobjs += tcnt.accumobjs;
+			ctx->cnt_summed.accumbytes += tcnt.accumbytes;
+		}
+	}
+
+	if (ctx->cnt_summed.curobjs != 0)
+		(*leak_nctx)++;
+
+	/* Add to cnt_all. */
+	cnt_all->curobjs += ctx->cnt_summed.curobjs;
+	cnt_all->curbytes += ctx->cnt_summed.curbytes;
+	if (opt_prof_accum) {
+		cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
+		cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
+	}
+
+	malloc_mutex_unlock(ctx->lock);
+}
+
+static void
+prof_ctx_destroy(prof_ctx_t *ctx)
+{
+
+	cassert(config_prof);
+
+	/*
+	 * Check that ctx is still unused by any thread cache before destroying
+	 * it.  prof_lookup() artificially raises ctx->cnt_merge.curobjs in
+	 * order to avoid a race condition with this function, as does
+	 * prof_ctx_merge() in order to avoid a race between the main body of
+	 * prof_ctx_merge() and entry into this function.
+	 */
+	prof_enter();
+	malloc_mutex_lock(ctx->lock);
+	if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 1) {
+		assert(ctx->cnt_merged.curbytes == 0);
+		assert(ctx->cnt_merged.accumobjs == 0);
+		assert(ctx->cnt_merged.accumbytes == 0);
+		/* Remove ctx from bt2ctx. */
+		if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
+			assert(false);
+		prof_leave();
+		/* Destroy ctx. */
+		malloc_mutex_unlock(ctx->lock);
+		bt_destroy(ctx->bt);
+		idalloc(ctx);
+	} else {
+		/*
+		 * Compensate for increment in prof_ctx_merge() or
+		 * prof_lookup().
+		 */
+		ctx->cnt_merged.curobjs--;
+		malloc_mutex_unlock(ctx->lock);
+		prof_leave();
+	}
+}
+
+static void
+prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
+{
+	bool destroy;
+
+	cassert(config_prof);
+
+	/* Merge cnt stats and detach from ctx. */
+	malloc_mutex_lock(ctx->lock);
+	ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
+	ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
+	ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
+	ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
+	ql_remove(&ctx->cnts_ql, cnt, cnts_link);
+	if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
+	    ctx->cnt_merged.curobjs == 0) {
+		/*
+		 * Artificially raise ctx->cnt_merged.curobjs in order to keep
+		 * another thread from winning the race to destroy ctx while
+		 * this one has ctx->lock dropped.  Without this, it would be
+		 * possible for another thread to:
+		 *
+		 * 1) Sample an allocation associated with ctx.
+		 * 2) Deallocate the sampled object.
+		 * 3) Successfully prof_ctx_destroy(ctx).
+		 *
+		 * The result would be that ctx no longer exists by the time
+		 * this thread accesses it in prof_ctx_destroy().
+		 */
+		ctx->cnt_merged.curobjs++;
+		destroy = true;
+	} else
+		destroy = false;
+	malloc_mutex_unlock(ctx->lock);
+	if (destroy)
+		prof_ctx_destroy(ctx);
+}
+
+static bool
+prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
+{
+	unsigned i;
+
+	cassert(config_prof);
+
+	if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
+		assert(ctx->cnt_summed.curbytes == 0);
+		assert(ctx->cnt_summed.accumobjs == 0);
+		assert(ctx->cnt_summed.accumbytes == 0);
+		return (false);
+	}
+
+	if (prof_printf(propagate_err, "%"PRId64": %"PRId64
+	    " [%"PRIu64": %"PRIu64"] @",
+	    ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
+	    ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes))
+		return (true);
+
+	for (i = 0; i < bt->len; i++) {
+		if (prof_printf(propagate_err, " %#"PRIxPTR,
+		    (uintptr_t)bt->vec[i]))
+			return (true);
+	}
+
+	if (prof_write(propagate_err, "\n"))
+		return (true);
+
+	return (false);
+}
+
+static bool
+prof_dump_maps(bool propagate_err)
+{
+	int mfd;
+	char filename[PATH_MAX + 1];
+
+	cassert(config_prof);
+
+	malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
+	    (int)getpid());
+	mfd = open(filename, O_RDONLY);
+	if (mfd != -1) {
+		ssize_t nread;
+
+		if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
+		    propagate_err)
+			return (true);
+		nread = 0;
+		do {
+			prof_dump_buf_end += nread;
+			if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
+				/* Make space in prof_dump_buf before read(). */
+				if (prof_flush(propagate_err) && propagate_err)
+					return (true);
+			}
+			nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
+			    PROF_DUMP_BUFSIZE - prof_dump_buf_end);
+		} while (nread > 0);
+		close(mfd);
+	} else
+		return (true);
+
+	return (false);
+}
+
+static bool
+prof_dump(bool propagate_err, const char *filename, bool leakcheck)
+{
+	prof_cnt_t cnt_all;
+	size_t tabind;
+	union {
+		prof_bt_t	*p;
+		void		*v;
+	} bt;
+	union {
+		prof_ctx_t	*p;
+		void		*v;
+	} ctx;
+	size_t leak_nctx;
+
+	cassert(config_prof);
+
+	prof_enter();
+	prof_dump_fd = creat(filename, 0644);
+	if (prof_dump_fd == -1) {
+		if (propagate_err == false) {
+			malloc_printf(
+			    "<jemalloc>: creat(\"%s\"), 0644) failed\n",
+			    filename);
+			if (opt_abort)
+				abort();
+		}
+		goto label_error;
+	}
+
+	/* Merge per thread profile stats, and sum them in cnt_all. */
+	memset(&cnt_all, 0, sizeof(prof_cnt_t));
+	leak_nctx = 0;
+	for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
+		prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx);
+
+	/* Dump profile header. */
+	if (opt_lg_prof_sample == 0) {
+		if (prof_printf(propagate_err,
+		    "heap profile: %"PRId64": %"PRId64
+		    " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
+		    cnt_all.curobjs, cnt_all.curbytes,
+		    cnt_all.accumobjs, cnt_all.accumbytes))
+			goto label_error;
+	} else {
+		if (prof_printf(propagate_err,
+		    "heap profile: %"PRId64": %"PRId64
+		    " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
+		    cnt_all.curobjs, cnt_all.curbytes,
+		    cnt_all.accumobjs, cnt_all.accumbytes,
+		    ((uint64_t)1U << opt_lg_prof_sample)))
+			goto label_error;
+	}
+
+	/* Dump  per ctx profile stats. */
+	for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v)
+	    == false;) {
+		if (prof_dump_ctx(propagate_err, ctx.p, bt.p))
+			goto label_error;
+	}
+
+	/* Dump /proc/<pid>/maps if possible. */
+	if (prof_dump_maps(propagate_err))
+		goto label_error;
+
+	if (prof_flush(propagate_err))
+		goto label_error;
+	close(prof_dump_fd);
+	prof_leave();
+
+	if (leakcheck && cnt_all.curbytes != 0) {
+		malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
+		    PRId64" object%s, %zu context%s\n",
+		    cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "",
+		    cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "",
+		    leak_nctx, (leak_nctx != 1) ? "s" : "");
+		malloc_printf(
+		    "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
+		    filename);
+	}
+
+	return (false);
+label_error:
+	prof_leave();
+	return (true);
+}
+
+#define	DUMP_FILENAME_BUFSIZE	(PATH_MAX + 1)
+static void
+prof_dump_filename(char *filename, char v, int64_t vseq)
+{
+
+	cassert(config_prof);
+
+	if (vseq != UINT64_C(0xffffffffffffffff)) {
+	        /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
+		malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+		    "%s.%d.%"PRIu64".%c%"PRId64".heap",
+		    opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
+	} else {
+	        /* "<prefix>.<pid>.<seq>.<v>.heap" */
+		malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+		    "%s.%d.%"PRIu64".%c.heap",
+		    opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
+	}
+}
+
+static void
+prof_fdump(void)
+{
+	char filename[DUMP_FILENAME_BUFSIZE];
+
+	cassert(config_prof);
+
+	if (prof_booted == false)
+		return;
+
+	if (opt_prof_prefix[0] != '\0') {
+		malloc_mutex_lock(&prof_dump_seq_mtx);
+		prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff));
+		malloc_mutex_unlock(&prof_dump_seq_mtx);
+		prof_dump(false, filename, opt_prof_leak);
+	}
+}
+
+void
+prof_idump(void)
+{
+	char filename[PATH_MAX + 1];
+
+	cassert(config_prof);
+
+	if (prof_booted == false)
+		return;
+	malloc_mutex_lock(&enq_mtx);
+	if (enq) {
+		enq_idump = true;
+		malloc_mutex_unlock(&enq_mtx);
+		return;
+	}
+	malloc_mutex_unlock(&enq_mtx);
+
+	if (opt_prof_prefix[0] != '\0') {
+		malloc_mutex_lock(&prof_dump_seq_mtx);
+		prof_dump_filename(filename, 'i', prof_dump_iseq);
+		prof_dump_iseq++;
+		malloc_mutex_unlock(&prof_dump_seq_mtx);
+		prof_dump(false, filename, false);
+	}
+}
+
+bool
+prof_mdump(const char *filename)
+{
+	char filename_buf[DUMP_FILENAME_BUFSIZE];
+
+	cassert(config_prof);
+
+	if (opt_prof == false || prof_booted == false)
+		return (true);
+
+	if (filename == NULL) {
+		/* No filename specified, so automatically generate one. */
+		if (opt_prof_prefix[0] == '\0')
+			return (true);
+		malloc_mutex_lock(&prof_dump_seq_mtx);
+		prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
+		prof_dump_mseq++;
+		malloc_mutex_unlock(&prof_dump_seq_mtx);
+		filename = filename_buf;
+	}
+	return (prof_dump(true, filename, false));
+}
+
+void
+prof_gdump(void)
+{
+	char filename[DUMP_FILENAME_BUFSIZE];
+
+	cassert(config_prof);
+
+	if (prof_booted == false)
+		return;
+	malloc_mutex_lock(&enq_mtx);
+	if (enq) {
+		enq_gdump = true;
+		malloc_mutex_unlock(&enq_mtx);
+		return;
+	}
+	malloc_mutex_unlock(&enq_mtx);
+
+	if (opt_prof_prefix[0] != '\0') {
+		malloc_mutex_lock(&prof_dump_seq_mtx);
+		prof_dump_filename(filename, 'u', prof_dump_useq);
+		prof_dump_useq++;
+		malloc_mutex_unlock(&prof_dump_seq_mtx);
+		prof_dump(false, filename, false);
+	}
+}
+
+static void
+prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
+{
+	size_t ret1, ret2;
+	uint64_t h;
+	prof_bt_t *bt = (prof_bt_t *)key;
+
+	cassert(config_prof);
+	assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
+	assert(hash1 != NULL);
+	assert(hash2 != NULL);
+
+	h = hash(bt->vec, bt->len * sizeof(void *),
+	    UINT64_C(0x94122f335b332aea));
+	if (minbits <= 32) {
+		/*
+		 * Avoid doing multiple hashes, since a single hash provides
+		 * enough bits.
+		 */
+		ret1 = h & ZU(0xffffffffU);
+		ret2 = h >> 32;
+	} else {
+		ret1 = h;
+		ret2 = hash(bt->vec, bt->len * sizeof(void *),
+		    UINT64_C(0x8432a476666bbc13));
+	}
+
+	*hash1 = ret1;
+	*hash2 = ret2;
+}
+
+static bool
+prof_bt_keycomp(const void *k1, const void *k2)
+{
+	const prof_bt_t *bt1 = (prof_bt_t *)k1;
+	const prof_bt_t *bt2 = (prof_bt_t *)k2;
+
+	cassert(config_prof);
+
+	if (bt1->len != bt2->len)
+		return (false);
+	return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
+}
+
+static malloc_mutex_t *
+prof_ctx_mutex_choose(void)
+{
+	unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
+
+	return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
+}
+
+prof_tdata_t *
+prof_tdata_init(void)
+{
+	prof_tdata_t *prof_tdata;
+
+	cassert(config_prof);
+
+	/* Initialize an empty cache for this thread. */
+	prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
+	if (prof_tdata == NULL)
+		return (NULL);
+
+	if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
+	    prof_bt_hash, prof_bt_keycomp)) {
+		idalloc(prof_tdata);
+		return (NULL);
+	}
+	ql_new(&prof_tdata->lru_ql);
+
+	prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
+	if (prof_tdata->vec == NULL) {
+		ckh_delete(&prof_tdata->bt2cnt);
+		idalloc(prof_tdata);
+		return (NULL);
+	}
+
+	prof_tdata->prng_state = 0;
+	prof_tdata->threshold = 0;
+	prof_tdata->accum = 0;
+
+	prof_tdata_tsd_set(&prof_tdata);
+
+	return (prof_tdata);
+}
+
+void
+prof_tdata_cleanup(void *arg)
+{
+	prof_thr_cnt_t *cnt;
+	prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg;
+
+	cassert(config_prof);
+
+	/*
+	 * Delete the hash table.  All of its contents can still be iterated
+	 * over via the LRU.
+	 */
+	ckh_delete(&prof_tdata->bt2cnt);
+
+	/* Iteratively merge cnt's into the global stats and delete them. */
+	while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
+		ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
+		prof_ctx_merge(cnt->ctx, cnt);
+		idalloc(cnt);
+	}
+
+	idalloc(prof_tdata->vec);
+
+	idalloc(prof_tdata);
+	prof_tdata = NULL;
+	prof_tdata_tsd_set(&prof_tdata);
+}
+
+void
+prof_boot0(void)
+{
+
+	cassert(config_prof);
+
+	memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
+	    sizeof(PROF_PREFIX_DEFAULT));
+}
+
+void
+prof_boot1(void)
+{
+
+	cassert(config_prof);
+
+	/*
+	 * opt_prof and prof_promote must be in their final state before any
+	 * arenas are initialized, so this function must be executed early.
+	 */
+
+	if (opt_prof_leak && opt_prof == false) {
+		/*
+		 * Enable opt_prof, but in such a way that profiles are never
+		 * automatically dumped.
+		 */
+		opt_prof = true;
+		opt_prof_gdump = false;
+		prof_interval = 0;
+	} else if (opt_prof) {
+		if (opt_lg_prof_interval >= 0) {
+			prof_interval = (((uint64_t)1U) <<
+			    opt_lg_prof_interval);
+		} else
+			prof_interval = 0;
+	}
+
+	prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
+}
+
+bool
+prof_boot2(void)
+{
+
+	cassert(config_prof);
+
+	if (opt_prof) {
+		unsigned i;
+
+		if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
+		    prof_bt_keycomp))
+			return (true);
+		if (malloc_mutex_init(&bt2ctx_mtx))
+			return (true);
+		if (prof_tdata_tsd_boot()) {
+			malloc_write(
+			    "<jemalloc>: Error in pthread_key_create()\n");
+			abort();
+		}
+
+		if (malloc_mutex_init(&prof_dump_seq_mtx))
+			return (true);
+
+		if (malloc_mutex_init(&enq_mtx))
+			return (true);
+		enq = false;
+		enq_idump = false;
+		enq_gdump = false;
+
+		if (atexit(prof_fdump) != 0) {
+			malloc_write("<jemalloc>: Error in atexit()\n");
+			if (opt_abort)
+				abort();
+		}
+
+		ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
+		    sizeof(malloc_mutex_t));
+		if (ctx_locks == NULL)
+			return (true);
+		for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+			if (malloc_mutex_init(&ctx_locks[i]))
+				return (true);
+		}
+	}
+
+#ifdef JEMALLOC_PROF_LIBGCC
+	/*
+	 * Cause the backtracing machinery to allocate its internal state
+	 * before enabling profiling.
+	 */
+	_Unwind_Backtrace(prof_unwind_init_callback, NULL);
+#endif
+
+	prof_booted = true;
+
+	return (false);
+}
+
+/******************************************************************************/
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/quarantine.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/quarantine.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,163 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+typedef struct quarantine_s quarantine_t;
+
+struct quarantine_s {
+	size_t	curbytes;
+	size_t	curobjs;
+	size_t	first;
+#define	LG_MAXOBJS_INIT 10
+	size_t	lg_maxobjs;
+	void	*objs[1]; /* Dynamically sized ring buffer. */
+};
+
+static void	quarantine_cleanup(void *arg);
+
+malloc_tsd_data(static, quarantine, quarantine_t *, NULL)
+malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL,
+    quarantine_cleanup)
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static quarantine_t	*quarantine_init(size_t lg_maxobjs);
+static quarantine_t	*quarantine_grow(quarantine_t *quarantine);
+static void	quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
+
+/******************************************************************************/
+
+static quarantine_t *
+quarantine_init(size_t lg_maxobjs)
+{
+	quarantine_t *quarantine;
+
+	quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
+	    ((ZU(1) << lg_maxobjs) * sizeof(void *)));
+	if (quarantine == NULL)
+		return (NULL);
+	quarantine->curbytes = 0;
+	quarantine->curobjs = 0;
+	quarantine->first = 0;
+	quarantine->lg_maxobjs = lg_maxobjs;
+
+	quarantine_tsd_set(&quarantine);
+
+	return (quarantine);
+}
+
+static quarantine_t *
+quarantine_grow(quarantine_t *quarantine)
+{
+	quarantine_t *ret;
+
+	ret = quarantine_init(quarantine->lg_maxobjs + 1);
+	if (ret == NULL)
+		return (quarantine);
+
+	ret->curbytes = quarantine->curbytes;
+	if (quarantine->first + quarantine->curobjs < (ZU(1) <<
+	    quarantine->lg_maxobjs)) {
+		/* objs ring buffer data are contiguous. */
+		memcpy(ret->objs, &quarantine->objs[quarantine->first],
+		    quarantine->curobjs * sizeof(void *));
+		ret->curobjs = quarantine->curobjs;
+	} else {
+		/* objs ring buffer data wrap around. */
+		size_t ncopy = (ZU(1) << quarantine->lg_maxobjs) -
+		    quarantine->first;
+		memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy *
+		    sizeof(void *));
+		ret->curobjs = ncopy;
+		if (quarantine->curobjs != 0) {
+			memcpy(&ret->objs[ret->curobjs], quarantine->objs,
+			    quarantine->curobjs - ncopy);
+		}
+	}
+
+	return (ret);
+}
+
+static void
+quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
+{
+
+	while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) {
+		void *ptr = quarantine->objs[quarantine->first];
+		size_t usize = isalloc(ptr, config_prof);
+		idalloc(ptr);
+		quarantine->curbytes -= usize;
+		quarantine->curobjs--;
+		quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
+		    quarantine->lg_maxobjs) - 1);
+	}
+}
+
+void
+quarantine(void *ptr)
+{
+	quarantine_t *quarantine;
+	size_t usize = isalloc(ptr, config_prof);
+
+	assert(config_fill);
+	assert(opt_quarantine);
+
+	quarantine = *quarantine_tsd_get();
+	if (quarantine == NULL && (quarantine =
+	    quarantine_init(LG_MAXOBJS_INIT)) == NULL) {
+		idalloc(ptr);
+		return;
+	}
+	/*
+	 * Drain one or more objects if the quarantine size limit would be
+	 * exceeded by appending ptr.
+	 */
+	if (quarantine->curbytes + usize > opt_quarantine) {
+		size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
+		    - usize : 0;
+		quarantine_drain(quarantine, upper_bound);
+	}
+	/* Grow the quarantine ring buffer if it's full. */
+	if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
+		quarantine = quarantine_grow(quarantine);
+	/* quarantine_grow() must free a slot if it fails to grow. */
+	assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
+	/* Append ptr if its size doesn't exceed the quarantine size. */
+	if (quarantine->curbytes + usize <= opt_quarantine) {
+		size_t offset = (quarantine->first + quarantine->curobjs) &
+		    ((ZU(1) << quarantine->lg_maxobjs) - 1);
+		quarantine->objs[offset] = ptr;
+		quarantine->curbytes += usize;
+		quarantine->curobjs++;
+		if (opt_junk)
+			memset(ptr, 0x5a, usize);
+	} else {
+		assert(quarantine->curbytes == 0);
+		idalloc(ptr);
+	}
+}
+
+static void
+quarantine_cleanup(void *arg)
+{
+	quarantine_t *quarantine = *(quarantine_t **)arg;
+
+	if (quarantine != NULL) {
+		quarantine_drain(quarantine, 0);
+		idalloc(quarantine);
+	}
+}
+
+bool
+quarantine_boot(void)
+{
+
+	assert(config_fill);
+
+	if (quarantine_tsd_boot())
+		return (true);
+
+	return (false);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/rtree.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/rtree.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,46 @@
+#define	JEMALLOC_RTREE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+rtree_t *
+rtree_new(unsigned bits)
+{
+	rtree_t *ret;
+	unsigned bits_per_level, height, i;
+
+	bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
+	height = bits / bits_per_level;
+	if (height * bits_per_level != bits)
+		height++;
+	assert(height * bits_per_level >= bits);
+
+	ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) +
+	    (sizeof(unsigned) * height));
+	if (ret == NULL)
+		return (NULL);
+	memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
+	    height));
+
+	if (malloc_mutex_init(&ret->mutex)) {
+		/* Leak the rtree. */
+		return (NULL);
+	}
+	ret->height = height;
+	if (bits_per_level * height > bits)
+		ret->level2bits[0] = bits % bits_per_level;
+	else
+		ret->level2bits[0] = bits_per_level;
+	for (i = 1; i < height; i++)
+		ret->level2bits[i] = bits_per_level;
+
+	ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]);
+	if (ret->root == NULL) {
+		/*
+		 * We leak the rtree here, since there's no generic base
+		 * deallocation.
+		 */
+		return (NULL);
+	}
+	memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
+
+	return (ret);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/stats.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/stats.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,550 @@
+#define	JEMALLOC_STATS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#define	CTL_GET(n, v, t) do {						\
+	size_t sz = sizeof(t);						\
+	xmallctl(n, v, &sz, NULL, 0);					\
+} while (0)
+
+#define	CTL_I_GET(n, v, t) do {						\
+	size_t mib[6];							\
+	size_t miblen = sizeof(mib) / sizeof(size_t);			\
+	size_t sz = sizeof(t);						\
+	xmallctlnametomib(n, mib, &miblen);				\
+	mib[2] = i;							\
+	xmallctlbymib(mib, miblen, v, &sz, NULL, 0);			\
+} while (0)
+
+#define	CTL_J_GET(n, v, t) do {						\
+	size_t mib[6];							\
+	size_t miblen = sizeof(mib) / sizeof(size_t);			\
+	size_t sz = sizeof(t);						\
+	xmallctlnametomib(n, mib, &miblen);				\
+	mib[2] = j;							\
+	xmallctlbymib(mib, miblen, v, &sz, NULL, 0);			\
+} while (0)
+
+#define	CTL_IJ_GET(n, v, t) do {					\
+	size_t mib[6];							\
+	size_t miblen = sizeof(mib) / sizeof(size_t);			\
+	size_t sz = sizeof(t);						\
+	xmallctlnametomib(n, mib, &miblen);				\
+	mib[2] = i;							\
+	mib[4] = j;							\
+	xmallctlbymib(mib, miblen, v, &sz, NULL, 0);			\
+} while (0)
+
+/******************************************************************************/
+/* Data. */
+
+bool	opt_stats_print = false;
+
+size_t	stats_cactive = 0;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void	stats_arena_bins_print(void (*write_cb)(void *, const char *),
+    void *cbopaque, unsigned i);
+static void	stats_arena_lruns_print(void (*write_cb)(void *, const char *),
+    void *cbopaque, unsigned i);
+static void	stats_arena_print(void (*write_cb)(void *, const char *),
+    void *cbopaque, unsigned i, bool bins, bool large);
+
+/******************************************************************************/
+
+static void
+stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
+    unsigned i)
+{
+	size_t page;
+	bool config_tcache;
+	unsigned nbins, j, gap_start;
+
+	CTL_GET("arenas.page", &page, size_t);
+
+	CTL_GET("config.tcache", &config_tcache, bool);
+	if (config_tcache) {
+		malloc_cprintf(write_cb, cbopaque,
+		    "bins:     bin  size regs pgs    allocated      nmalloc"
+		    "      ndalloc    nrequests       nfills     nflushes"
+		    "      newruns       reruns      curruns\n");
+	} else {
+		malloc_cprintf(write_cb, cbopaque,
+		    "bins:     bin  size regs pgs    allocated      nmalloc"
+		    "      ndalloc      newruns       reruns      curruns\n");
+	}
+	CTL_GET("arenas.nbins", &nbins, unsigned);
+	for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
+		uint64_t nruns;
+
+		CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
+		if (nruns == 0) {
+			if (gap_start == UINT_MAX)
+				gap_start = j;
+		} else {
+			size_t reg_size, run_size, allocated;
+			uint32_t nregs;
+			uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
+			uint64_t reruns;
+			size_t curruns;
+
+			if (gap_start != UINT_MAX) {
+				if (j > gap_start + 1) {
+					/* Gap of more than one size class. */
+					malloc_cprintf(write_cb, cbopaque,
+					    "[%u..%u]\n", gap_start,
+					    j - 1);
+				} else {
+					/* Gap of one size class. */
+					malloc_cprintf(write_cb, cbopaque,
+					    "[%u]\n", gap_start);
+				}
+				gap_start = UINT_MAX;
+			}
+			CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
+			CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
+			CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
+			CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
+			    &allocated, size_t);
+			CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
+			    &nmalloc, uint64_t);
+			CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
+			    &ndalloc, uint64_t);
+			if (config_tcache) {
+				CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
+				    &nrequests, uint64_t);
+				CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
+				    &nfills, uint64_t);
+				CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
+				    &nflushes, uint64_t);
+			}
+			CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
+			    uint64_t);
+			CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
+			    size_t);
+			if (config_tcache) {
+				malloc_cprintf(write_cb, cbopaque,
+				    "%13u %5zu %4u %3zu %12zu %12"PRIu64
+				    " %12"PRIu64" %12"PRIu64" %12"PRIu64
+				    " %12"PRIu64" %12"PRIu64" %12"PRIu64
+				    " %12zu\n",
+				    j, reg_size, nregs, run_size / page,
+				    allocated, nmalloc, ndalloc, nrequests,
+				    nfills, nflushes, nruns, reruns, curruns);
+			} else {
+				malloc_cprintf(write_cb, cbopaque,
+				    "%13u %5zu %4u %3zu %12zu %12"PRIu64
+				    " %12"PRIu64" %12"PRIu64" %12"PRIu64
+				    " %12zu\n",
+				    j, reg_size, nregs, run_size / page,
+				    allocated, nmalloc, ndalloc, nruns, reruns,
+				    curruns);
+			}
+		}
+	}
+	if (gap_start != UINT_MAX) {
+		if (j > gap_start + 1) {
+			/* Gap of more than one size class. */
+			malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
+			    gap_start, j - 1);
+		} else {
+			/* Gap of one size class. */
+			malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
+		}
+	}
+}
+
+static void
+stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
+    unsigned i)
+{
+	size_t page, nlruns, j;
+	ssize_t gap_start;
+
+	CTL_GET("arenas.page", &page, size_t);
+
+	malloc_cprintf(write_cb, cbopaque,
+	    "large:   size pages      nmalloc      ndalloc    nrequests"
+	    "      curruns\n");
+	CTL_GET("arenas.nlruns", &nlruns, size_t);
+	for (j = 0, gap_start = -1; j < nlruns; j++) {
+		uint64_t nmalloc, ndalloc, nrequests;
+		size_t run_size, curruns;
+
+		CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
+		    uint64_t);
+		CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
+		    uint64_t);
+		CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
+		    uint64_t);
+		if (nrequests == 0) {
+			if (gap_start == -1)
+				gap_start = j;
+		} else {
+			CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
+			CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
+			    size_t);
+			if (gap_start != -1) {
+				malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
+				    j - gap_start);
+				gap_start = -1;
+			}
+			malloc_cprintf(write_cb, cbopaque,
+			    "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
+			    " %12zu\n",
+			    run_size, run_size / page, nmalloc, ndalloc,
+			    nrequests, curruns);
+		}
+	}
+	if (gap_start != -1)
+		malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
+}
+
+static void
+stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
+    unsigned i, bool bins, bool large)
+{
+	unsigned nthreads;
+	size_t page, pactive, pdirty, mapped;
+	uint64_t npurge, nmadvise, purged;
+	size_t small_allocated;
+	uint64_t small_nmalloc, small_ndalloc, small_nrequests;
+	size_t large_allocated;
+	uint64_t large_nmalloc, large_ndalloc, large_nrequests;
+
+	CTL_GET("arenas.page", &page, size_t);
+
+	CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
+	malloc_cprintf(write_cb, cbopaque,
+	    "assigned threads: %u\n", nthreads);
+	CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
+	CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
+	CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
+	CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
+	CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
+	malloc_cprintf(write_cb, cbopaque,
+	    "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
+	    " %"PRIu64" madvise%s, %"PRIu64" purged\n",
+	    pactive, pdirty, npurge, npurge == 1 ? "" : "s",
+	    nmadvise, nmadvise == 1 ? "" : "s", purged);
+
+	malloc_cprintf(write_cb, cbopaque,
+	    "            allocated      nmalloc      ndalloc    nrequests\n");
+	CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
+	CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
+	CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
+	CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
+	malloc_cprintf(write_cb, cbopaque,
+	    "small:   %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+	    small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
+	CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
+	CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
+	CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
+	CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
+	malloc_cprintf(write_cb, cbopaque,
+	    "large:   %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+	    large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
+	malloc_cprintf(write_cb, cbopaque,
+	    "total:   %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+	    small_allocated + large_allocated,
+	    small_nmalloc + large_nmalloc,
+	    small_ndalloc + large_ndalloc,
+	    small_nrequests + large_nrequests);
+	malloc_cprintf(write_cb, cbopaque, "active:  %12zu\n", pactive * page);
+	CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
+	malloc_cprintf(write_cb, cbopaque, "mapped:  %12zu\n", mapped);
+
+	if (bins)
+		stats_arena_bins_print(write_cb, cbopaque, i);
+	if (large)
+		stats_arena_lruns_print(write_cb, cbopaque, i);
+}
+
+void
+stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+    const char *opts)
+{
+	int err;
+	uint64_t epoch;
+	size_t u64sz;
+	bool general = true;
+	bool merged = true;
+	bool unmerged = true;
+	bool bins = true;
+	bool large = true;
+
+	/*
+	 * Refresh stats, in case mallctl() was called by the application.
+	 *
+	 * Check for OOM here, since refreshing the ctl cache can trigger
+	 * allocation.  In practice, none of the subsequent mallctl()-related
+	 * calls in this function will cause OOM if this one succeeds.
+	 * */
+	epoch = 1;
+	u64sz = sizeof(uint64_t);
+	err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
+	if (err != 0) {
+		if (err == EAGAIN) {
+			malloc_write("<jemalloc>: Memory allocation failure in "
+			    "mallctl(\"epoch\", ...)\n");
+			return;
+		}
+		malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
+		    "...)\n");
+		abort();
+	}
+
+	if (write_cb == NULL) {
+		/*
+		 * The caller did not provide an alternate write_cb callback
+		 * function, so use the default one.  malloc_write() is an
+		 * inline function, so use malloc_message() directly here.
+		 */
+		write_cb = je_malloc_message;
+		cbopaque = NULL;
+	}
+
+	if (opts != NULL) {
+		unsigned i;
+
+		for (i = 0; opts[i] != '\0'; i++) {
+			switch (opts[i]) {
+			case 'g':
+				general = false;
+				break;
+			case 'm':
+				merged = false;
+				break;
+			case 'a':
+				unmerged = false;
+				break;
+			case 'b':
+				bins = false;
+				break;
+			case 'l':
+				large = false;
+				break;
+			default:;
+			}
+		}
+	}
+
+	write_cb(cbopaque, "___ Begin jemalloc statistics ___\n");
+	if (general) {
+		int err;
+		const char *cpv;
+		bool bv;
+		unsigned uv;
+		ssize_t ssv;
+		size_t sv, bsz, ssz, sssz, cpsz;
+
+		bsz = sizeof(bool);
+		ssz = sizeof(size_t);
+		sssz = sizeof(ssize_t);
+		cpsz = sizeof(const char *);
+
+		CTL_GET("version", &cpv, const char *);
+		malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
+		CTL_GET("config.debug", &bv, bool);
+		malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
+		    bv ? "enabled" : "disabled");
+
+#define OPT_WRITE_BOOL(n)						\
+		if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0))	\
+		    == 0) {						\
+			malloc_cprintf(write_cb, cbopaque,		\
+			    "  opt."#n": %s\n", bv ? "true" : "false");	\
+		}
+#define OPT_WRITE_SIZE_T(n)						\
+		if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0))	\
+		    == 0) {						\
+			malloc_cprintf(write_cb, cbopaque,		\
+			"  opt."#n": %zu\n", sv);			\
+		}
+#define OPT_WRITE_SSIZE_T(n)						\
+		if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0))	\
+		    == 0) {						\
+			malloc_cprintf(write_cb, cbopaque,		\
+			    "  opt."#n": %zd\n", ssv);			\
+		}
+#define OPT_WRITE_CHAR_P(n)						\
+		if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0))	\
+		    == 0) {						\
+			malloc_cprintf(write_cb, cbopaque,		\
+			    "  opt."#n": \"%s\"\n", cpv);		\
+		}
+
+		write_cb(cbopaque, "Run-time option settings:\n");
+		OPT_WRITE_BOOL(abort)
+		OPT_WRITE_SIZE_T(lg_chunk)
+		OPT_WRITE_SIZE_T(narenas)
+		OPT_WRITE_SSIZE_T(lg_dirty_mult)
+		OPT_WRITE_BOOL(stats_print)
+		OPT_WRITE_BOOL(junk)
+		OPT_WRITE_SIZE_T(quarantine)
+		OPT_WRITE_BOOL(redzone)
+		OPT_WRITE_BOOL(zero)
+		OPT_WRITE_BOOL(utrace)
+		OPT_WRITE_BOOL(valgrind)
+		OPT_WRITE_BOOL(xmalloc)
+		OPT_WRITE_BOOL(tcache)
+		OPT_WRITE_SSIZE_T(lg_tcache_max)
+		OPT_WRITE_BOOL(prof)
+		OPT_WRITE_CHAR_P(prof_prefix)
+		OPT_WRITE_BOOL(prof_active)
+		OPT_WRITE_SSIZE_T(lg_prof_sample)
+		OPT_WRITE_BOOL(prof_accum)
+		OPT_WRITE_SSIZE_T(lg_prof_interval)
+		OPT_WRITE_BOOL(prof_gdump)
+		OPT_WRITE_BOOL(prof_leak)
+
+#undef OPT_WRITE_BOOL
+#undef OPT_WRITE_SIZE_T
+#undef OPT_WRITE_SSIZE_T
+#undef OPT_WRITE_CHAR_P
+
+		malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
+
+		CTL_GET("arenas.narenas", &uv, unsigned);
+		malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv);
+
+		malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
+		    sizeof(void *));
+
+		CTL_GET("arenas.quantum", &sv, size_t);
+		malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+
+		CTL_GET("arenas.page", &sv, size_t);
+		malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
+
+		CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
+		if (ssv >= 0) {
+			malloc_cprintf(write_cb, cbopaque,
+			    "Min active:dirty page ratio per arena: %u:1\n",
+			    (1U << ssv));
+		} else {
+			write_cb(cbopaque,
+			    "Min active:dirty page ratio per arena: N/A\n");
+		}
+		if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
+		    == 0) {
+			malloc_cprintf(write_cb, cbopaque,
+			    "Maximum thread-cached size class: %zu\n", sv);
+		}
+		if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
+		    bv) {
+			CTL_GET("opt.lg_prof_sample", &sv, size_t);
+			malloc_cprintf(write_cb, cbopaque,
+			    "Average profile sample interval: %"PRIu64
+			    " (2^%zu)\n", (((uint64_t)1U) << sv), sv);
+
+			CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
+			if (ssv >= 0) {
+				malloc_cprintf(write_cb, cbopaque,
+				    "Average profile dump interval: %"PRIu64
+				    " (2^%zd)\n",
+				    (((uint64_t)1U) << ssv), ssv);
+			} else {
+				write_cb(cbopaque,
+				    "Average profile dump interval: N/A\n");
+			}
+		}
+		CTL_GET("opt.lg_chunk", &sv, size_t);
+		malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
+		    (ZU(1) << sv), sv);
+	}
+
+	if (config_stats) {
+		size_t *cactive;
+		size_t allocated, active, mapped;
+		size_t chunks_current, chunks_high;
+		uint64_t chunks_total;
+		size_t huge_allocated;
+		uint64_t huge_nmalloc, huge_ndalloc;
+
+		CTL_GET("stats.cactive", &cactive, size_t *);
+		CTL_GET("stats.allocated", &allocated, size_t);
+		CTL_GET("stats.active", &active, size_t);
+		CTL_GET("stats.mapped", &mapped, size_t);
+		malloc_cprintf(write_cb, cbopaque,
+		    "Allocated: %zu, active: %zu, mapped: %zu\n",
+		    allocated, active, mapped);
+		malloc_cprintf(write_cb, cbopaque,
+		    "Current active ceiling: %zu\n", atomic_read_z(cactive));
+
+		/* Print chunk stats. */
+		CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
+		CTL_GET("stats.chunks.high", &chunks_high, size_t);
+		CTL_GET("stats.chunks.current", &chunks_current, size_t);
+		malloc_cprintf(write_cb, cbopaque, "chunks: nchunks   "
+		    "highchunks    curchunks\n");
+		malloc_cprintf(write_cb, cbopaque, "  %13"PRIu64"%13zu%13zu\n",
+		    chunks_total, chunks_high, chunks_current);
+
+		/* Print huge stats. */
+		CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
+		CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
+		CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
+		malloc_cprintf(write_cb, cbopaque,
+		    "huge: nmalloc      ndalloc    allocated\n");
+		malloc_cprintf(write_cb, cbopaque,
+		    " %12"PRIu64" %12"PRIu64" %12zu\n",
+		    huge_nmalloc, huge_ndalloc, huge_allocated);
+
+		if (merged) {
+			unsigned narenas;
+
+			CTL_GET("arenas.narenas", &narenas, unsigned);
+			{
+				bool initialized[narenas];
+				size_t isz;
+				unsigned i, ninitialized;
+
+				isz = sizeof(initialized);
+				xmallctl("arenas.initialized", initialized,
+				    &isz, NULL, 0);
+				for (i = ninitialized = 0; i < narenas; i++) {
+					if (initialized[i])
+						ninitialized++;
+				}
+
+				if (ninitialized > 1 || unmerged == false) {
+					/* Print merged arena stats. */
+					malloc_cprintf(write_cb, cbopaque,
+					    "\nMerged arenas stats:\n");
+					stats_arena_print(write_cb, cbopaque,
+					    narenas, bins, large);
+				}
+			}
+		}
+
+		if (unmerged) {
+			unsigned narenas;
+
+			/* Print stats for each arena. */
+
+			CTL_GET("arenas.narenas", &narenas, unsigned);
+			{
+				bool initialized[narenas];
+				size_t isz;
+				unsigned i;
+
+				isz = sizeof(initialized);
+				xmallctl("arenas.initialized", initialized,
+				    &isz, NULL, 0);
+
+				for (i = 0; i < narenas; i++) {
+					if (initialized[i]) {
+						malloc_cprintf(write_cb,
+						    cbopaque,
+						    "\narenas[%u]:\n", i);
+						stats_arena_print(write_cb,
+						    cbopaque, i, bins, large);
+					}
+				}
+			}
+		}
+	}
+	write_cb(cbopaque, "--- End jemalloc statistics ---\n");
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/tcache.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/tcache.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,435 @@
+#define	JEMALLOC_TCACHE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+malloc_tsd_data(, tcache, tcache_t *, NULL)
+malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
+
+bool	opt_tcache = true;
+ssize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
+
+tcache_bin_info_t	*tcache_bin_info;
+static unsigned		stack_nelms; /* Total stack elms per tcache. */
+
+size_t			nhbins;
+size_t			tcache_maxclass;
+
+/******************************************************************************/
+
+void *
+tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
+{
+	void *ret;
+
+	arena_tcache_fill_small(tcache->arena, tbin, binind,
+	    config_prof ? tcache->prof_accumbytes : 0);
+	if (config_prof)
+		tcache->prof_accumbytes = 0;
+	ret = tcache_alloc_easy(tbin);
+
+	return (ret);
+}
+
+void
+tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
+    tcache_t *tcache)
+{
+	void *ptr;
+	unsigned i, nflush, ndeferred;
+	bool merged_stats = false;
+
+	assert(binind < NBINS);
+	assert(rem <= tbin->ncached);
+
+	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+		/* Lock the arena bin associated with the first object. */
+		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+		    tbin->avail[0]);
+		arena_t *arena = chunk->arena;
+		arena_bin_t *bin = &arena->bins[binind];
+
+		if (config_prof && arena == tcache->arena) {
+			malloc_mutex_lock(&arena->lock);
+			arena_prof_accum(arena, tcache->prof_accumbytes);
+			malloc_mutex_unlock(&arena->lock);
+			tcache->prof_accumbytes = 0;
+		}
+
+		malloc_mutex_lock(&bin->lock);
+		if (config_stats && arena == tcache->arena) {
+			assert(merged_stats == false);
+			merged_stats = true;
+			bin->stats.nflushes++;
+			bin->stats.nrequests += tbin->tstats.nrequests;
+			tbin->tstats.nrequests = 0;
+		}
+		ndeferred = 0;
+		for (i = 0; i < nflush; i++) {
+			ptr = tbin->avail[i];
+			assert(ptr != NULL);
+			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+			if (chunk->arena == arena) {
+				size_t pageind = ((uintptr_t)ptr -
+				    (uintptr_t)chunk) >> LG_PAGE;
+				arena_chunk_map_t *mapelm =
+				    &chunk->map[pageind-map_bias];
+				if (config_fill && opt_junk) {
+					arena_alloc_junk_small(ptr,
+					    &arena_bin_info[binind], true);
+				}
+				arena_dalloc_bin(arena, chunk, ptr, mapelm);
+			} else {
+				/*
+				 * This object was allocated via a different
+				 * arena bin than the one that is currently
+				 * locked.  Stash the object, so that it can be
+				 * handled in a future pass.
+				 */
+				tbin->avail[ndeferred] = ptr;
+				ndeferred++;
+			}
+		}
+		malloc_mutex_unlock(&bin->lock);
+	}
+	if (config_stats && merged_stats == false) {
+		/*
+		 * The flush loop didn't happen to flush to this thread's
+		 * arena, so the stats didn't get merged.  Manually do so now.
+		 */
+		arena_bin_t *bin = &tcache->arena->bins[binind];
+		malloc_mutex_lock(&bin->lock);
+		bin->stats.nflushes++;
+		bin->stats.nrequests += tbin->tstats.nrequests;
+		tbin->tstats.nrequests = 0;
+		malloc_mutex_unlock(&bin->lock);
+	}
+
+	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+	    rem * sizeof(void *));
+	tbin->ncached = rem;
+	if ((int)tbin->ncached < tbin->low_water)
+		tbin->low_water = tbin->ncached;
+}
+
+void
+tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
+    tcache_t *tcache)
+{
+	void *ptr;
+	unsigned i, nflush, ndeferred;
+	bool merged_stats = false;
+
+	assert(binind < nhbins);
+	assert(rem <= tbin->ncached);
+
+	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+		/* Lock the arena associated with the first object. */
+		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+		    tbin->avail[0]);
+		arena_t *arena = chunk->arena;
+
+		malloc_mutex_lock(&arena->lock);
+		if ((config_prof || config_stats) && arena == tcache->arena) {
+			if (config_prof) {
+				arena_prof_accum(arena,
+				    tcache->prof_accumbytes);
+				tcache->prof_accumbytes = 0;
+			}
+			if (config_stats) {
+				merged_stats = true;
+				arena->stats.nrequests_large +=
+				    tbin->tstats.nrequests;
+				arena->stats.lstats[binind - NBINS].nrequests +=
+				    tbin->tstats.nrequests;
+				tbin->tstats.nrequests = 0;
+			}
+		}
+		ndeferred = 0;
+		for (i = 0; i < nflush; i++) {
+			ptr = tbin->avail[i];
+			assert(ptr != NULL);
+			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+			if (chunk->arena == arena)
+				arena_dalloc_large(arena, chunk, ptr);
+			else {
+				/*
+				 * This object was allocated via a different
+				 * arena than the one that is currently locked.
+				 * Stash the object, so that it can be handled
+				 * in a future pass.
+				 */
+				tbin->avail[ndeferred] = ptr;
+				ndeferred++;
+			}
+		}
+		malloc_mutex_unlock(&arena->lock);
+	}
+	if (config_stats && merged_stats == false) {
+		/*
+		 * The flush loop didn't happen to flush to this thread's
+		 * arena, so the stats didn't get merged.  Manually do so now.
+		 */
+		arena_t *arena = tcache->arena;
+		malloc_mutex_lock(&arena->lock);
+		arena->stats.nrequests_large += tbin->tstats.nrequests;
+		arena->stats.lstats[binind - NBINS].nrequests +=
+		    tbin->tstats.nrequests;
+		tbin->tstats.nrequests = 0;
+		malloc_mutex_unlock(&arena->lock);
+	}
+
+	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+	    rem * sizeof(void *));
+	tbin->ncached = rem;
+	if ((int)tbin->ncached < tbin->low_water)
+		tbin->low_water = tbin->ncached;
+}
+
+void
+tcache_arena_associate(tcache_t *tcache, arena_t *arena)
+{
+
+	if (config_stats) {
+		/* Link into list of extant tcaches. */
+		malloc_mutex_lock(&arena->lock);
+		ql_elm_new(tcache, link);
+		ql_tail_insert(&arena->tcache_ql, tcache, link);
+		malloc_mutex_unlock(&arena->lock);
+	}
+	tcache->arena = arena;
+}
+
+void
+tcache_arena_dissociate(tcache_t *tcache)
+{
+
+	if (config_stats) {
+		/* Unlink from list of extant tcaches. */
+		malloc_mutex_lock(&tcache->arena->lock);
+		ql_remove(&tcache->arena->tcache_ql, tcache, link);
+		malloc_mutex_unlock(&tcache->arena->lock);
+		tcache_stats_merge(tcache, tcache->arena);
+	}
+}
+
+tcache_t *
+tcache_create(arena_t *arena)
+{
+	tcache_t *tcache;
+	size_t size, stack_offset;
+	unsigned i;
+
+	size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
+	/* Naturally align the pointer stacks. */
+	size = PTR_CEILING(size);
+	stack_offset = size;
+	size += stack_nelms * sizeof(void *);
+	/*
+	 * Round up to the nearest multiple of the cacheline size, in order to
+	 * avoid the possibility of false cacheline sharing.
+	 *
+	 * That this works relies on the same logic as in ipalloc(), but we
+	 * cannot directly call ipalloc() here due to tcache bootstrapping
+	 * issues.
+	 */
+	size = (size + CACHELINE_MASK) & (-CACHELINE);
+
+	if (size <= SMALL_MAXCLASS)
+		tcache = (tcache_t *)arena_malloc_small(arena, size, true);
+	else if (size <= tcache_maxclass)
+		tcache = (tcache_t *)arena_malloc_large(arena, size, true);
+	else
+		tcache = (tcache_t *)icalloc(size);
+
+	if (tcache == NULL)
+		return (NULL);
+
+	tcache_arena_associate(tcache, arena);
+
+	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
+	for (i = 0; i < nhbins; i++) {
+		tcache->tbins[i].lg_fill_div = 1;
+		tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
+		    (uintptr_t)stack_offset);
+		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+	}
+
+	tcache_tsd_set(&tcache);
+
+	return (tcache);
+}
+
+void
+tcache_destroy(tcache_t *tcache)
+{
+	unsigned i;
+	size_t tcache_size;
+
+	tcache_arena_dissociate(tcache);
+
+	for (i = 0; i < NBINS; i++) {
+		tcache_bin_t *tbin = &tcache->tbins[i];
+		tcache_bin_flush_small(tbin, i, 0, tcache);
+
+		if (config_stats && tbin->tstats.nrequests != 0) {
+			arena_t *arena = tcache->arena;
+			arena_bin_t *bin = &arena->bins[i];
+			malloc_mutex_lock(&bin->lock);
+			bin->stats.nrequests += tbin->tstats.nrequests;
+			malloc_mutex_unlock(&bin->lock);
+		}
+	}
+
+	for (; i < nhbins; i++) {
+		tcache_bin_t *tbin = &tcache->tbins[i];
+		tcache_bin_flush_large(tbin, i, 0, tcache);
+
+		if (config_stats && tbin->tstats.nrequests != 0) {
+			arena_t *arena = tcache->arena;
+			malloc_mutex_lock(&arena->lock);
+			arena->stats.nrequests_large += tbin->tstats.nrequests;
+			arena->stats.lstats[i - NBINS].nrequests +=
+			    tbin->tstats.nrequests;
+			malloc_mutex_unlock(&arena->lock);
+		}
+	}
+
+	if (config_prof && tcache->prof_accumbytes > 0) {
+		malloc_mutex_lock(&tcache->arena->lock);
+		arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
+		malloc_mutex_unlock(&tcache->arena->lock);
+	}
+
+	tcache_size = arena_salloc(tcache, false);
+	if (tcache_size <= SMALL_MAXCLASS) {
+		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
+		arena_t *arena = chunk->arena;
+		size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
+		    LG_PAGE;
+		arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
+		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+		    (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
+		    LG_PAGE));
+		arena_bin_t *bin = run->bin;
+
+		malloc_mutex_lock(&bin->lock);
+		arena_dalloc_bin(arena, chunk, tcache, mapelm);
+		malloc_mutex_unlock(&bin->lock);
+	} else if (tcache_size <= tcache_maxclass) {
+		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
+		arena_t *arena = chunk->arena;
+
+		malloc_mutex_lock(&arena->lock);
+		arena_dalloc_large(arena, chunk, tcache);
+		malloc_mutex_unlock(&arena->lock);
+	} else
+		idalloc(tcache);
+}
+
+void
+tcache_thread_cleanup(void *arg)
+{
+	tcache_t *tcache = *(tcache_t **)arg;
+
+	if (tcache == TCACHE_STATE_DISABLED) {
+		/* Do nothing. */
+	} else if (tcache == TCACHE_STATE_REINCARNATED) {
+		/*
+		 * Another destructor called an allocator function after this
+		 * destructor was called.  Reset tcache to
+		 * TCACHE_STATE_PURGATORY in order to receive another callback.
+		 */
+		tcache = TCACHE_STATE_PURGATORY;
+		tcache_tsd_set(&tcache);
+	} else if (tcache == TCACHE_STATE_PURGATORY) {
+		/*
+		 * The previous time this destructor was called, we set the key
+		 * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
+		 * cause re-creation of the tcache.  This time, do nothing, so
+		 * that the destructor will not be called again.
+		 */
+	} else if (tcache != NULL) {
+		assert(tcache != TCACHE_STATE_PURGATORY);
+		tcache_destroy(tcache);
+		tcache = TCACHE_STATE_PURGATORY;
+		tcache_tsd_set(&tcache);
+	}
+}
+
+void
+tcache_stats_merge(tcache_t *tcache, arena_t *arena)
+{
+	unsigned i;
+
+	/* Merge and reset tcache stats. */
+	for (i = 0; i < NBINS; i++) {
+		arena_bin_t *bin = &arena->bins[i];
+		tcache_bin_t *tbin = &tcache->tbins[i];
+		malloc_mutex_lock(&bin->lock);
+		bin->stats.nrequests += tbin->tstats.nrequests;
+		malloc_mutex_unlock(&bin->lock);
+		tbin->tstats.nrequests = 0;
+	}
+
+	for (; i < nhbins; i++) {
+		malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
+		tcache_bin_t *tbin = &tcache->tbins[i];
+		arena->stats.nrequests_large += tbin->tstats.nrequests;
+		lstats->nrequests += tbin->tstats.nrequests;
+		tbin->tstats.nrequests = 0;
+	}
+}
+
+bool
+tcache_boot0(void)
+{
+	unsigned i;
+
+	/*
+	 * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
+	 * known.
+	 */
+	if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
+		tcache_maxclass = SMALL_MAXCLASS;
+	else if ((1U << opt_lg_tcache_max) > arena_maxclass)
+		tcache_maxclass = arena_maxclass;
+	else
+		tcache_maxclass = (1U << opt_lg_tcache_max);
+
+	nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
+
+	/* Initialize tcache_bin_info. */
+	tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
+	    sizeof(tcache_bin_info_t));
+	if (tcache_bin_info == NULL)
+		return (true);
+	stack_nelms = 0;
+	for (i = 0; i < NBINS; i++) {
+		if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
+			tcache_bin_info[i].ncached_max =
+			    (arena_bin_info[i].nregs << 1);
+		} else {
+			tcache_bin_info[i].ncached_max =
+			    TCACHE_NSLOTS_SMALL_MAX;
+		}
+		stack_nelms += tcache_bin_info[i].ncached_max;
+	}
+	for (; i < nhbins; i++) {
+		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
+		stack_nelms += tcache_bin_info[i].ncached_max;
+	}
+
+	return (false);
+}
+
+bool
+tcache_boot1(void)
+{
+
+	if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
+		return (true);
+
+	return (false);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/tsd.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/tsd.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,72 @@
+#define	JEMALLOC_TSD_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+static unsigned ncleanups;
+static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
+
+/******************************************************************************/
+
+void *
+malloc_tsd_malloc(size_t size)
+{
+
+	/* Avoid choose_arena() in order to dodge bootstrapping issues. */
+	return arena_malloc(arenas[0], size, false, false);
+}
+
+void
+malloc_tsd_dalloc(void *wrapper)
+{
+
+	idalloc(wrapper);
+}
+
+void
+malloc_tsd_no_cleanup(void *arg)
+{
+
+	not_reached();
+}
+
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+void
+_malloc_thread_cleanup(void)
+{
+	bool pending[ncleanups], again;
+	unsigned i;
+
+	for (i = 0; i < ncleanups; i++)
+		pending[i] = true;
+
+	do {
+		again = false;
+		for (i = 0; i < ncleanups; i++) {
+			if (pending[i]) {
+				pending[i] = cleanups[i].f(cleanups[i].arg);
+				if (pending[i])
+					again = true;
+			}
+		}
+	} while (again);
+}
+#endif
+
+void
+malloc_tsd_cleanup_register(bool (*f)(void *), void *arg)
+{
+
+	assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
+	cleanups[ncleanups].f = f;
+	cleanups[ncleanups].arg = arg;
+	ncleanups++;
+}
+
+void
+malloc_tsd_boot(void)
+{
+
+	ncleanups = 0;
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/jemalloc/src/util.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/jemalloc/src/util.c	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,635 @@
+#define	assert(e) do {							\
+	if (config_debug && !(e)) {					\
+		malloc_write("<jemalloc>: Failed assertion\n");		\
+		abort();						\
+	}								\
+} while (0)
+
+#define	not_reached() do {						\
+	if (config_debug) {						\
+		malloc_write("<jemalloc>: Unreachable code reached\n");	\
+		abort();						\
+	}								\
+} while (0)
+
+#define	not_implemented() do {						\
+	if (config_debug) {						\
+		malloc_write("<jemalloc>: Not implemented\n");		\
+		abort();						\
+	}								\
+} while (0)
+
+#define	JEMALLOC_UTIL_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void	wrtmessage(void *cbopaque, const char *s);
+#define	U2S_BUFSIZE	((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
+static char	*u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
+    size_t *slen_p);
+#define	D2S_BUFSIZE	(1 + U2S_BUFSIZE)
+static char	*d2s(intmax_t x, char sign, char *s, size_t *slen_p);
+#define	O2S_BUFSIZE	(1 + U2S_BUFSIZE)
+static char	*o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
+#define	X2S_BUFSIZE	(2 + U2S_BUFSIZE)
+static char	*x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
+    size_t *slen_p);
+
+/******************************************************************************/
+
+/* malloc_message() setup. */
+JEMALLOC_CATTR(visibility("hidden"), static)
+void
+wrtmessage(void *cbopaque, const char *s)
+{
+
+#ifdef SYS_write
+	/*
+	 * Use syscall(2) rather than write(2) when possible in order to avoid
+	 * the possibility of memory allocation within libc.  This is necessary
+	 * on FreeBSD; most operating systems do not have this problem though.
+	 */
+	UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
+#else
+	UNUSED int result = write(STDERR_FILENO, s, strlen(s));
+#endif
+}
+
+void	(*je_malloc_message)(void *, const char *s)
+    JEMALLOC_ATTR(visibility("default")) = wrtmessage;
+
+JEMALLOC_CATTR(visibility("hidden"), static)
+void
+wrtmessage_1_0(const char *s1, const char *s2, const char *s3,
+    const char *s4)
+{
+
+	wrtmessage(NULL, s1);
+	wrtmessage(NULL, s2);
+	wrtmessage(NULL, s3);
+	wrtmessage(NULL, s4);
+}
+
+void	(*__malloc_message_1_0)(const char *s1, const char *s2, const char *s3,
+    const char *s4) = wrtmessage_1_0;
+__sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0);
+
+/*
+ * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
+ * provide a wrapper.
+ */
+int
+buferror(int errnum, char *buf, size_t buflen)
+{
+#ifdef _GNU_SOURCE
+	char *b = strerror_r(errno, buf, buflen);
+	if (b != buf) {
+		strncpy(buf, b, buflen);
+		buf[buflen-1] = '\0';
+	}
+	return (0);
+#else
+	return (strerror_r(errno, buf, buflen));
+#endif
+}
+
+uintmax_t
+malloc_strtoumax(const char *nptr, char **endptr, int base)
+{
+	uintmax_t ret, digit;
+	int b;
+	bool neg;
+	const char *p, *ns;
+
+	if (base < 0 || base == 1 || base > 36) {
+		errno = EINVAL;
+		return (UINTMAX_MAX);
+	}
+	b = base;
+
+	/* Swallow leading whitespace and get sign, if any. */
+	neg = false;
+	p = nptr;
+	while (true) {
+		switch (*p) {
+		case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
+			p++;
+			break;
+		case '-':
+			neg = true;
+			/* Fall through. */
+		case '+':
+			p++;
+			/* Fall through. */
+		default:
+			goto label_prefix;
+		}
+	}
+
+	/* Get prefix, if any. */
+	label_prefix:
+	/*
+	 * Note where the first non-whitespace/sign character is so that it is
+	 * possible to tell whether any digits are consumed (e.g., "  0" vs.
+	 * "  -x").
+	 */
+	ns = p;
+	if (*p == '0') {
+		switch (p[1]) {
+		case '0': case '1': case '2': case '3': case '4': case '5':
+		case '6': case '7':
+			if (b == 0)
+				b = 8;
+			if (b == 8)
+				p++;
+			break;
+		case 'x':
+			switch (p[2]) {
+			case '0': case '1': case '2': case '3': case '4':
+			case '5': case '6': case '7': case '8': case '9':
+			case 'A': case 'B': case 'C': case 'D': case 'E':
+			case 'F':
+			case 'a': case 'b': case 'c': case 'd': case 'e':
+			case 'f':
+				if (b == 0)
+					b = 16;
+				if (b == 16)
+					p += 2;
+				break;
+			default:
+				break;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	if (b == 0)
+		b = 10;
+
+	/* Convert. */
+	ret = 0;
+	while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
+	    || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
+	    || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
+		uintmax_t pret = ret;
+		ret *= b;
+		ret += digit;
+		if (ret < pret) {
+			/* Overflow. */
+			errno = ERANGE;
+			return (UINTMAX_MAX);
+		}
+		p++;
+	}
+	if (neg)
+		ret = -ret;
+
+	if (endptr != NULL) {
+		if (p == ns) {
+			/* No characters were converted. */
+			*endptr = (char *)nptr;
+		} else
+			*endptr = (char *)p;
+	}
+
+	return (ret);
+}
+
+static char *
+u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
+{
+	unsigned i;
+
+	i = U2S_BUFSIZE - 1;
+	s[i] = '\0';
+	switch (base) {
+	case 10:
+		do {
+			i--;
+			s[i] = "0123456789"[x % (uint64_t)10];
+			x /= (uint64_t)10;
+		} while (x > 0);
+		break;
+	case 16: {
+		const char *digits = (uppercase)
+		    ? "0123456789ABCDEF"
+		    : "0123456789abcdef";
+
+		do {
+			i--;
+			s[i] = digits[x & 0xf];
+			x >>= 4;
+		} while (x > 0);
+		break;
+	} default: {
+		const char *digits = (uppercase)
+		    ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+		    : "0123456789abcdefghijklmnopqrstuvwxyz";
+
+		assert(base >= 2 && base <= 36);
+		do {
+			i--;
+			s[i] = digits[x % (uint64_t)base];
+			x /= (uint64_t)base;
+		} while (x > 0);
+	}}
+
+	*slen_p = U2S_BUFSIZE - 1 - i;
+	return (&s[i]);
+}
+
+static char *
+d2s(intmax_t x, char sign, char *s, size_t *slen_p)
+{
+	bool neg;
+
+	if ((neg = (x < 0)))
+		x = -x;
+	s = u2s(x, 10, false, s, slen_p);
+	if (neg)
+		sign = '-';
+	switch (sign) {
+	case '-':
+		if (neg == false)
+			break;
+		/* Fall through. */
+	case ' ':
+	case '+':
+		s--;
+		(*slen_p)++;
+		*s = sign;
+		break;
+	default: not_reached();
+	}
+	return (s);
+}
+
+static char *
+o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
+{
+
+	s = u2s(x, 8, false, s, slen_p);
+	if (alt_form && *s != '0') {
+		s--;
+		(*slen_p)++;
+		*s = '0';
+	}
+	return (s);
+}
+
+static char *
+x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
+{
+
+	s = u2s(x, 16, uppercase, s, slen_p);
+	if (alt_form) {
+		s -= 2;
+		(*slen_p) += 2;
+		memcpy(s, uppercase ? "0X" : "0x", 2);
+	}
+	return (s);
+}
+
+int
+malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
+{
+	int ret;
+	size_t i;
+	const char *f;
+	va_list tap;
+
+#define	APPEND_C(c) do {						\
+	if (i < size)							\
+		str[i] = (c);						\
+	i++;								\
+} while (0)
+#define	APPEND_S(s, slen) do {						\
+	if (i < size) {							\
+		size_t cpylen = (slen <= size - i) ? slen : size - i;	\
+		memcpy(&str[i], s, cpylen);				\
+	}								\
+	i += slen;							\
+} while (0)
+#define	APPEND_PADDED_S(s, slen, width, left_justify) do {		\
+	/* Left padding. */						\
+	size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ?	\
+	    (size_t)width - slen : 0);					\
+	if (left_justify == false && pad_len != 0) {			\
+		size_t j;						\
+		for (j = 0; j < pad_len; j++)				\
+			APPEND_C(' ');					\
+	}								\
+	/* Value. */							\
+	APPEND_S(s, slen);						\
+	/* Right padding. */						\
+	if (left_justify && pad_len != 0) {				\
+		size_t j;						\
+		for (j = 0; j < pad_len; j++)				\
+			APPEND_C(' ');					\
+	}								\
+} while (0)
+#define GET_ARG_NUMERIC(val, len) do {					\
+	switch (len) {							\
+	case '?':							\
+		val = va_arg(ap, int);					\
+		break;							\
+	case 'l':							\
+		val = va_arg(ap, long);					\
+		break;							\
+	case 'q':							\
+		val = va_arg(ap, long long);				\
+		break;							\
+	case 'j':							\
+		val = va_arg(ap, intmax_t);				\
+		break;							\
+	case 't':							\
+		val = va_arg(ap, ptrdiff_t);				\
+		break;							\
+	case 'z':							\
+		val = va_arg(ap, ssize_t);				\
+		break;							\
+	case 'p': /* Synthetic; used for %p. */				\
+		val = va_arg(ap, uintptr_t);				\
+		break;							\
+	default: not_reached();						\
+	}								\
+} while (0)
+
+	if (config_debug)
+		va_copy(tap, ap);
+
+	i = 0;
+	f = format;
+	while (true) {
+		switch (*f) {
+		case '\0': goto label_out;
+		case '%': {
+			bool alt_form = false;
+			bool zero_pad = false;
+			bool left_justify = false;
+			bool plus_space = false;
+			bool plus_plus = false;
+			int prec = -1;
+			int width = -1;
+			char len = '?';
+
+			f++;
+			if (*f == '%') {
+				/* %% */
+				APPEND_C(*f);
+				break;
+			}
+			/* Flags. */
+			while (true) {
+				switch (*f) {
+				case '#':
+					assert(alt_form == false);
+					alt_form = true;
+					break;
+				case '0':
+					assert(zero_pad == false);
+					zero_pad = true;
+					break;
+				case '-':
+					assert(left_justify == false);
+					left_justify = true;
+					break;
+				case ' ':
+					assert(plus_space == false);
+					plus_space = true;
+					break;
+				case '+':
+					assert(plus_plus == false);
+					plus_plus = true;
+					break;
+				default: goto label_width;
+				}
+				f++;
+			}
+			/* Width. */
+			label_width:
+			switch (*f) {
+			case '*':
+				width = va_arg(ap, int);
+				f++;
+				break;
+			case '0': case '1': case '2': case '3': case '4':
+			case '5': case '6': case '7': case '8': case '9': {
+				uintmax_t uwidth;
+				errno = 0;
+				uwidth = malloc_strtoumax(f, (char **)&f, 10);
+				assert(uwidth != UINTMAX_MAX || errno !=
+				    ERANGE);
+				width = (int)uwidth;
+				if (*f == '.') {
+					f++;
+					goto label_precision;
+				} else
+					goto label_length;
+				break;
+			} case '.':
+				f++;
+				goto label_precision;
+			default: goto label_length;
+			}
+			/* Precision. */
+			label_precision:
+			switch (*f) {
+			case '*':
+				prec = va_arg(ap, int);
+				f++;
+				break;
+			case '0': case '1': case '2': case '3': case '4':
+			case '5': case '6': case '7': case '8': case '9': {
+				uintmax_t uprec;
+				errno = 0;
+				uprec = malloc_strtoumax(f, (char **)&f, 10);
+				assert(uprec != UINTMAX_MAX || errno != ERANGE);
+				prec = (int)uprec;
+				break;
+			}
+			default: break;
+			}
+			/* Length. */
+			label_length:
+			switch (*f) {
+			case 'l':
+				f++;
+				if (*f == 'l') {
+					len = 'q';
+					f++;
+				} else
+					len = 'l';
+				break;
+			case 'j':
+				len = 'j';
+				f++;
+				break;
+			case 't':
+				len = 't';
+				f++;
+				break;
+			case 'z':
+				len = 'z';
+				f++;
+				break;
+			default: break;
+			}
+			/* Conversion specifier. */
+			switch (*f) {
+				char *s;
+				size_t slen;
+			case 'd': case 'i': {
+				intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+				char buf[D2S_BUFSIZE];
+
+				GET_ARG_NUMERIC(val, len);
+				s = d2s(val, (plus_plus ? '+' : (plus_space ?
+				    ' ' : '-')), buf, &slen);
+				APPEND_PADDED_S(s, slen, width, left_justify);
+				f++;
+				break;
+			} case 'o': {
+				uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+				char buf[O2S_BUFSIZE];
+
+				GET_ARG_NUMERIC(val, len);
+				s = o2s(val, alt_form, buf, &slen);
+				APPEND_PADDED_S(s, slen, width, left_justify);
+				f++;
+				break;
+			} case 'u': {
+				uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+				char buf[U2S_BUFSIZE];
+
+				GET_ARG_NUMERIC(val, len);
+				s = u2s(val, 10, false, buf, &slen);
+				APPEND_PADDED_S(s, slen, width, left_justify);
+				f++;
+				break;
+			} case 'x': case 'X': {
+				uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+				char buf[X2S_BUFSIZE];
+
+				GET_ARG_NUMERIC(val, len);
+				s = x2s(val, alt_form, *f == 'X', buf, &slen);
+				APPEND_PADDED_S(s, slen, width, left_justify);
+				f++;
+				break;
+			} case 'c': {
+				unsigned char val;
+				char buf[2];
+
+				assert(len == '?' || len == 'l');
+				assert_not_implemented(len != 'l');
+				val = va_arg(ap, int);
+				buf[0] = val;
+				buf[1] = '\0';
+				APPEND_PADDED_S(buf, 1, width, left_justify);
+				f++;
+				break;
+			} case 's':
+				assert(len == '?' || len == 'l');
+				assert_not_implemented(len != 'l');
+				s = va_arg(ap, char *);
+				slen = (prec == -1) ? strlen(s) : prec;
+				APPEND_PADDED_S(s, slen, width, left_justify);
+				f++;
+				break;
+			case 'p': {
+				uintmax_t val;
+				char buf[X2S_BUFSIZE];
+
+				GET_ARG_NUMERIC(val, 'p');
+				s = x2s(val, true, false, buf, &slen);
+				APPEND_PADDED_S(s, slen, width, left_justify);
+				f++;
+				break;
+			}
+			default: not_implemented();
+			}
+			break;
+		} default: {
+			APPEND_C(*f);
+			f++;
+			break;
+		}}
+	}
+	label_out:
+	if (i < size)
+		str[i] = '\0';
+	else
+		str[size - 1] = '\0';
+	ret = i;
+
+#undef APPEND_C
+#undef APPEND_S
+#undef APPEND_PADDED_S
+#undef GET_ARG_NUMERIC
+	return (ret);
+}
+
+JEMALLOC_ATTR(format(printf, 3, 4))
+int
+malloc_snprintf(char *str, size_t size, const char *format, ...)
+{
+	int ret;
+	va_list ap;
+
+	va_start(ap, format);
+	ret = malloc_vsnprintf(str, size, format, ap);
+	va_end(ap);
+
+	return (ret);
+}
+
+void
+malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+    const char *format, va_list ap)
+{
+	char buf[MALLOC_PRINTF_BUFSIZE];
+
+	if (write_cb == NULL) {
+		/*
+		 * The caller did not provide an alternate write_cb callback
+		 * function, so use the default one.  malloc_write() is an
+		 * inline function, so use malloc_message() directly here.
+		 */
+		write_cb = je_malloc_message;
+		cbopaque = NULL;
+	}
+
+	malloc_vsnprintf(buf, sizeof(buf), format, ap);
+	write_cb(cbopaque, buf);
+}
+
+/*
+ * Print to a callback function in such a way as to (hopefully) avoid memory
+ * allocation.
+ */
+JEMALLOC_ATTR(format(printf, 3, 4))
+void
+malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+    const char *format, ...)
+{
+	va_list ap;
+
+	va_start(ap, format);
+	malloc_vcprintf(write_cb, cbopaque, format, ap);
+	va_end(ap);
+}
+
+/* Print to stderr in such a way as to avoid memory allocation. */
+JEMALLOC_ATTR(format(printf, 1, 2))
+void
+malloc_printf(const char *format, ...)
+{
+	va_list ap;
+
+	va_start(ap, format);
+	malloc_vcprintf(NULL, NULL, format, ap);
+	va_end(ap);
+}
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm-c/TargetMachine.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm-c/TargetMachine.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,142 @@
+/*===-- llvm-c/TargetMachine.h - Target Machine Library C Interface - C++ -*-=*\
+|*                                                                            *|
+|*                     The LLVM Compiler Infrastructure                       *|
+|*                                                                            *|
+|* This file is distributed under the University of Illinois Open Source      *|
+|* License. See LICENSE.TXT for details.                                      *|
+|*                                                                            *|
+|*===----------------------------------------------------------------------===*|
+|*                                                                            *|
+|* This header declares the C interface to the Target and TargetMachine       *|
+|* classes, which can be used to generate assembly or object files.           *|
+|*                                                                            *|
+|* Many exotic languages can interoperate with C code but have a harder time  *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages.                                           *|
+|*                                                                            *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TARGETMACHINE_H
+#define LLVM_C_TARGETMACHINE_H
+
+#include "llvm-c/Core.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+typedef struct LLVMTargetMachine *LLVMTargetMachineRef;
+typedef struct LLVMTarget *LLVMTargetRef;
+
+typedef enum {
+    LLVMCodeGenLevelNone,
+    LLVMCodeGenLevelLess,
+    LLVMCodeGenLevelDefault,
+    LLVMCodeGenLevelAggressive
+} LLVMCodeGenOptLevel;
+
+typedef enum {
+    LLVMRelocDefault,
+    LLVMRelocStatic,
+    LLVMRelocPIC,
+    LLVMRelocDynamicNoPic
+} LLVMRelocMode;
+
+typedef enum {
+    LLVMCodeModelDefault,
+    LLVMCodeModelJITDefault,
+    LLVMCodeModelSmall,
+    LLVMCodeModelKernel,
+    LLVMCodeModelMedium,
+    LLVMCodeModelLarge
+} LLVMCodeModel;
+
+typedef enum {
+    LLVMAssemblyFile,
+    LLVMObjectFile
+} LLVMCodeGenFileType;
+
+/** Returns the first llvm::Target in the registered targets list. */
+LLVMTargetRef LLVMGetFirstTarget();
+/** Returns the next llvm::Target given a previous one (or null if there's none) */
+LLVMTargetRef LLVMGetNextTarget(LLVMTargetRef T);
+
+/*===-- Target ------------------------------------------------------------===*/
+/** Returns the name of a target. See llvm::Target::getName */
+const char *LLVMGetTargetName(LLVMTargetRef T);
+
+/** Returns the description  of a target. See llvm::Target::getDescription */
+const char *LLVMGetTargetDescription(LLVMTargetRef T);
+
+/** Returns if the target has a JIT */
+LLVMBool LLVMTargetHasJIT(LLVMTargetRef T);
+
+/** Returns if the target has a TargetMachine associated */
+LLVMBool LLVMTargetHasTargetMachine(LLVMTargetRef T);
+
+/** Returns if the target as an ASM backend (required for emitting output) */
+LLVMBool LLVMTargetHasAsmBackend(LLVMTargetRef T);
+
+/*===-- Target Machine ----------------------------------------------------===*/
+/** Creates a new llvm::TargetMachine. See llvm::Target::createTargetMachine */
+LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T, char *Triple,
+  char *CPU, char *Features, LLVMCodeGenOptLevel Level, LLVMRelocMode Reloc,
+  LLVMCodeModel CodeModel);
+
+/** Dispose the LLVMTargetMachineRef instance generated by
+  LLVMCreateTargetMachine. */
+void LLVMDisposeTargetMachine(LLVMTargetMachineRef T);
+
+/** Returns the Target used in a TargetMachine */
+LLVMTargetRef LLVMGetTargetMachineTarget(LLVMTargetMachineRef T);
+
+/** Returns the triple used creating this target machine. See
+  llvm::TargetMachine::getTriple. The result needs to be disposed with
+  LLVMDisposeMessage. */
+char *LLVMGetTargetMachineTriple(LLVMTargetMachineRef T);
+
+/** Returns the cpu used creating this target machine. See
+  llvm::TargetMachine::getCPU. The result needs to be disposed with
+  LLVMDisposeMessage. */
+char *LLVMGetTargetMachineCPU(LLVMTargetMachineRef T);
+
+/** Returns the feature string used creating this target machine. See
+  llvm::TargetMachine::getFeatureString. The result needs to be disposed with
+  LLVMDisposeMessage. */
+char *LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T);
+
+/** Returns the llvm::TargetData used for this llvm:TargetMachine. */
+LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T);
+
+/** Emits an asm or object file for the given module to the filename. This
+  wraps several c++ only classes (among them a file stream). Returns any
+  error in ErrorMessage. Use LLVMDisposeMessage to dispose the message. */
+LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
+  char *Filename, LLVMCodeGenFileType codegen, char **ErrorMessage);
+
+
+
+
+#ifdef __cplusplus
+}
+
+namespace llvm {
+  class TargetMachine;
+  class Target;
+
+  inline TargetMachine *unwrap(LLVMTargetMachineRef P) {
+    return reinterpret_cast<TargetMachine*>(P);
+  }
+  inline Target *unwrap(LLVMTargetRef P) {
+    return reinterpret_cast<Target*>(P);
+  }
+  inline LLVMTargetMachineRef wrap(const TargetMachine *P) {
+    return reinterpret_cast<LLVMTargetMachineRef>(
+      const_cast<TargetMachine*>(P));
+  }
+  inline LLVMTargetRef wrap(const Target * P) {
+    return reinterpret_cast<LLVMTargetRef>(const_cast<Target*>(P));
+  }
+}
+#endif
+
+#endif
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm-c/Transforms/Vectorize.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm-c/Transforms/Vectorize.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,48 @@
+/*===---------------------------Vectorize.h --------------------- -*- C -*-===*\
+|*===----------- Vectorization Transformation Library C Interface ---------===*|
+|*                                                                            *|
+|*                     The LLVM Compiler Infrastructure                       *|
+|*                                                                            *|
+|* This file is distributed under the University of Illinois Open Source      *|
+|* License. See LICENSE.TXT for details.                                      *|
+|*                                                                            *|
+|*===----------------------------------------------------------------------===*|
+|*                                                                            *|
+|* This header declares the C interface to libLLVMVectorize.a, which          *|
+|* implements various vectorization transformations of the LLVM IR.           *|
+|*                                                                            *|
+|* Many exotic languages can interoperate with C code but have a harder time  *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages.                                           *|
+|*                                                                            *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TRANSFORMS_VECTORIZE_H
+#define LLVM_C_TRANSFORMS_VECTORIZE_H
+
+#include "llvm-c/Core.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCTransformsVectorize Vectorization transformations
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
+/** See llvm::createBBVectorizePass function. */
+void LLVMAddBBVectorizePass(LLVMPassManagerRef PM);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* defined(__cplusplus) */
+
+#endif
+
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/ADT/Hashing.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/ADT/Hashing.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,770 @@
+//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the newly proposed standard C++ interfaces for hashing
+// arbitrary data and building hash functions for user-defined types. This
+// interface was originally proposed in N3333[1] and is currently under review
+// for inclusion in a future TR and/or standard.
+//
+// The primary interfaces provide are comprised of one type and three functions:
+//
+//  -- 'hash_code' class is an opaque type representing the hash code for some
+//     data. It is the intended product of hashing, and can be used to implement
+//     hash tables, checksumming, and other common uses of hashes. It is not an
+//     integer type (although it can be converted to one) because it is risky
+//     to assume much about the internals of a hash_code. In particular, each
+//     execution of the program has a high probability of producing a different
+//     hash_code for a given input. Thus their values are not stable to save or
+//     persist, and should only be used during the execution for the
+//     construction of hashing datastructures.
+//
+//  -- 'hash_value' is a function designed to be overloaded for each
+//     user-defined type which wishes to be used within a hashing context. It
+//     should be overloaded within the user-defined type's namespace and found
+//     via ADL. Overloads for primitive types are provided by this library.
+//
+//  -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
+//      programmers in easily and intuitively combining a set of data into
+//      a single hash_code for their object. They should only logically be used
+//      within the implementation of a 'hash_value' routine or similar context.
+//
+// Note that 'hash_combine_range' contains very special logic for hashing
+// a contiguous array of integers or pointers. This logic is *extremely* fast,
+// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
+// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
+// under 32-bytes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_HASHING_H
+#define LLVM_ADT_HASHING_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <iterator>
+#include <utility>
+
+// Allow detecting C++11 feature availability when building with Clang without
+// breaking other compilers.
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+namespace llvm {
+
+/// \brief An opaque object representing a hash code.
+///
+/// This object represents the result of hashing some entity. It is intended to
+/// be used to implement hashtables or other hashing-based data structures.
+/// While it wraps and exposes a numeric value, this value should not be
+/// trusted to be stable or predictable across processes or executions.
+///
+/// In order to obtain the hash_code for an object 'x':
+/// \code
+///   using llvm::hash_value;
+///   llvm::hash_code code = hash_value(x);
+/// \endcode
+///
+/// Also note that there are two numerical values which are reserved, and the
+/// implementation ensures will never be produced for real hash_codes. These
+/// can be used as sentinels within hashing data structures.
+class hash_code {
+  size_t value;
+
+public:
+  /// \brief Default construct a hash_code.
+  /// Note that this leaves the value uninitialized.
+  hash_code() {}
+
+  /// \brief Form a hash code directly from a numerical value.
+  hash_code(size_t value) : value(value) {}
+
+  /// \brief Convert the hash code to its numerical value for use.
+  /*explicit*/ operator size_t() const { return value; }
+
+  friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
+    return lhs.value == rhs.value;
+  }
+  friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
+    return lhs.value != rhs.value;
+  }
+
+  /// \brief Allow a hash_code to be directly run through hash_value.
+  friend size_t hash_value(const hash_code &code) { return code.value; }
+};
+
+/// \brief Compute a hash_code for any integer value.
+///
+/// Note that this function is intended to compute the same hash_code for
+/// a particular value without regard to the pre-promotion type. This is in
+/// contrast to hash_combine which may produce different hash_codes for
+/// differing argument types even if they would implicit promote to a common
+/// type without changing the value.
+template <typename T>
+typename enable_if<is_integral_or_enum<T>, hash_code>::type hash_value(T value);
+
+/// \brief Compute a hash_code for a pointer's address.
+///
+/// N.B.: This hashes the *address*. Not the value and not the type.
+template <typename T> hash_code hash_value(const T *ptr);
+
+/// \brief Compute a hash_code for a pair of objects.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg);
+
+/// \brief Compute a hash_code for a standard string.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg);
+
+
+/// \brief Override the execution seed with a fixed value.
+///
+/// This hashing library uses a per-execution seed designed to change on each
+/// run with high probability in order to ensure that the hash codes are not
+/// attackable and to ensure that output which is intended to be stable does
+/// not rely on the particulars of the hash codes produced.
+///
+/// That said, there are use cases where it is important to be able to
+/// reproduce *exactly* a specific behavior. To that end, we provide a function
+/// which will forcibly set the seed to a fixed value. This must be done at the
+/// start of the program, before any hashes are computed. Also, it cannot be
+/// undone. This makes it thread-hostile and very hard to use outside of
+/// immediately on start of a simple program designed for reproducible
+/// behavior.
+void set_fixed_execution_hash_seed(size_t fixed_value);
+
+
+// All of the implementation details of actually computing the various hash
+// code values are held within this namespace. These routines are included in
+// the header file mainly to allow inlining and constant propagation.
+namespace hashing {
+namespace detail {
+
+inline uint64_t fetch64(const char *p) {
+  uint64_t result;
+  memcpy(&result, p, sizeof(result));
+  if (sys::isBigEndianHost())
+    return sys::SwapByteOrder(result);
+  return result;
+}
+
+inline uint32_t fetch32(const char *p) {
+  uint32_t result;
+  memcpy(&result, p, sizeof(result));
+  if (sys::isBigEndianHost())
+    return sys::SwapByteOrder(result);
+  return result;
+}
+
+/// Some primes between 2^63 and 2^64 for various uses.
+static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
+static const uint64_t k1 = 0xb492b66fbe98f273ULL;
+static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
+static const uint64_t k3 = 0xc949d7c7509e6557ULL;
+
+/// \brief Bitwise right rotate.
+/// Normally this will compile to a single instruction, especially if the
+/// shift is a manifest constant.
+inline uint64_t rotate(uint64_t val, size_t shift) {
+  // Avoid shifting by 64: doing so yields an undefined result.
+  return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
+}
+
+inline uint64_t shift_mix(uint64_t val) {
+  return val ^ (val >> 47);
+}
+
+inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
+  // Murmur-inspired hashing.
+  const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+  uint64_t a = (low ^ high) * kMul;
+  a ^= (a >> 47);
+  uint64_t b = (high ^ a) * kMul;
+  b ^= (b >> 47);
+  b *= kMul;
+  return b;
+}
+
+inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
+  uint8_t a = s[0];
+  uint8_t b = s[len >> 1];
+  uint8_t c = s[len - 1];
+  uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
+  uint32_t z = len + (static_cast<uint32_t>(c) << 2);
+  return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
+}
+
+inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t a = fetch32(s);
+  return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
+}
+
+inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t a = fetch64(s);
+  uint64_t b = fetch64(s + len - 8);
+  return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
+}
+
+inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t a = fetch64(s) * k1;
+  uint64_t b = fetch64(s + 8);
+  uint64_t c = fetch64(s + len - 8) * k2;
+  uint64_t d = fetch64(s + len - 16) * k0;
+  return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d,
+                       a + rotate(b ^ k3, 20) - c + len + seed);
+}
+
+inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
+  uint64_t z = fetch64(s + 24);
+  uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
+  uint64_t b = rotate(a + z, 52);
+  uint64_t c = rotate(a, 37);
+  a += fetch64(s + 8);
+  c += rotate(a, 7);
+  a += fetch64(s + 16);
+  uint64_t vf = a + z;
+  uint64_t vs = b + rotate(a, 31) + c;
+  a = fetch64(s + 16) + fetch64(s + len - 32);
+  z = fetch64(s + len - 8);
+  b = rotate(a + z, 52);
+  c = rotate(a, 37);
+  a += fetch64(s + len - 24);
+  c += rotate(a, 7);
+  a += fetch64(s + len - 16);
+  uint64_t wf = a + z;
+  uint64_t ws = b + rotate(a, 31) + c;
+  uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
+  return shift_mix((seed ^ (r * k0)) + vs) * k2;
+}
+
+inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
+  if (length >= 4 && length <= 8)
+    return hash_4to8_bytes(s, length, seed);
+  if (length > 8 && length <= 16)
+    return hash_9to16_bytes(s, length, seed);
+  if (length > 16 && length <= 32)
+    return hash_17to32_bytes(s, length, seed);
+  if (length > 32)
+    return hash_33to64_bytes(s, length, seed);
+  if (length != 0)
+    return hash_1to3_bytes(s, length, seed);
+
+  return k2 ^ seed;
+}
+
+/// \brief The intermediate state used during hashing.
+/// Currently, the algorithm for computing hash codes is based on CityHash and
+/// keeps 56 bytes of arbitrary state.
+struct hash_state {
+  uint64_t h0, h1, h2, h3, h4, h5, h6;
+  uint64_t seed;
+
+  /// \brief Create a new hash_state structure and initialize it based on the
+  /// seed and the first 64-byte chunk.
+  /// This effectively performs the initial mix.
+  static hash_state create(const char *s, uint64_t seed) {
+    hash_state state = {
+      0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
+      seed * k1, shift_mix(seed), 0, seed };
+    state.h6 = hash_16_bytes(state.h4, state.h5);
+    state.mix(s);
+    return state;
+  }
+
+  /// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a'
+  /// and 'b', including whatever is already in 'a' and 'b'.
+  static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
+    a += fetch64(s);
+    uint64_t c = fetch64(s + 24);
+    b = rotate(b + a + c, 21);
+    uint64_t d = a;
+    a += fetch64(s + 8) + fetch64(s + 16);
+    b += rotate(a, 44) + d;
+    a += c;
+  }
+
+  /// \brief Mix in a 64-byte buffer of data.
+  /// We mix all 64 bytes even when the chunk length is smaller, but we
+  /// record the actual length.
+  void mix(const char *s) {
+    h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
+    h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
+    h0 ^= h6;
+    h1 += h3 + fetch64(s + 40);
+    h2 = rotate(h2 + h5, 33) * k1;
+    h3 = h4 * k1;
+    h4 = h0 + h5;
+    mix_32_bytes(s, h3, h4);
+    h5 = h2 + h6;
+    h6 = h1 + fetch64(s + 16);
+    mix_32_bytes(s + 32, h5, h6);
+    std::swap(h2, h0);
+  }
+
+  /// \brief Compute the final 64-bit hash code value based on the current
+  /// state and the length of bytes hashed.
+  uint64_t finalize(size_t length) {
+    return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
+                         hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
+  }
+};
+
+
+/// \brief A global, fixed seed-override variable.
+///
+/// This variable can be set using the \see llvm::set_fixed_execution_seed
+/// function. See that function for details. Do not, under any circumstances,
+/// set or read this variable.
+extern size_t fixed_seed_override;
+
+inline size_t get_execution_seed() {
+  // FIXME: This needs to be a per-execution seed. This is just a placeholder
+  // implementation. Switching to a per-execution seed is likely to flush out
+  // instability bugs and so will happen as its own commit.
+  //
+  // However, if there is a fixed seed override set the first time this is
+  // called, return that instead of the per-execution seed.
+  const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
+  static size_t seed = fixed_seed_override ? fixed_seed_override
+                                           : (size_t)seed_prime;
+  return seed;
+}
+
+
+/// \brief Trait to indicate whether a type's bits can be hashed directly.
+///
+/// A type trait which is true if we want to combine values for hashing by
+/// reading the underlying data. It is false if values of this type must
+/// first be passed to hash_value, and the resulting hash_codes combined.
+//
+// FIXME: We want to replace is_integral_or_enum and is_pointer here with
+// a predicate which asserts that comparing the underlying storage of two
+// values of the type for equality is equivalent to comparing the two values
+// for equality. For all the platforms we care about, this holds for integers
+// and pointers, but there are platforms where it doesn't and we would like to
+// support user-defined types which happen to satisfy this property.
+template <typename T> struct is_hashable_data
+  : integral_constant<bool, ((is_integral_or_enum<T>::value ||
+                              is_pointer<T>::value) &&
+                             64 % sizeof(T) == 0)> {};
+
+// Special case std::pair to detect when both types are viable and when there
+// is no alignment-derived padding in the pair. This is a bit of a lie because
+// std::pair isn't truly POD, but it's close enough in all reasonable
+// implementations for our use case of hashing the underlying data.
+template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
+  : integral_constant<bool, (is_hashable_data<T>::value &&
+                             is_hashable_data<U>::value &&
+                             (sizeof(T) + sizeof(U)) ==
+                              sizeof(std::pair<T, U>))> {};
+
+/// \brief Helper to get the hashable data representation for a type.
+/// This variant is enabled when the type itself can be used.
+template <typename T>
+typename enable_if<is_hashable_data<T>, T>::type
+get_hashable_data(const T &value) {
+  return value;
+}
+/// \brief Helper to get the hashable data representation for a type.
+/// This variant is enabled when we must first call hash_value and use the
+/// result as our data.
+template <typename T>
+typename enable_if_c<!is_hashable_data<T>::value, size_t>::type
+get_hashable_data(const T &value) {
+  using ::llvm::hash_value;
+  return hash_value(value);
+}
+
+/// \brief Helper to store data from a value into a buffer and advance the
+/// pointer into that buffer.
+///
+/// This routine first checks whether there is enough space in the provided
+/// buffer, and if not immediately returns false. If there is space, it
+/// copies the underlying bytes of value into the buffer, advances the
+/// buffer_ptr past the copied bytes, and returns true.
+template <typename T>
+bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
+                       size_t offset = 0) {
+  size_t store_size = sizeof(value) - offset;
+  if (buffer_ptr + store_size > buffer_end)
+    return false;
+  const char *value_data = reinterpret_cast<const char *>(&value);
+  memcpy(buffer_ptr, value_data + offset, store_size);
+  buffer_ptr += store_size;
+  return true;
+}
+
+/// \brief Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is
+/// integral. Rather than computing a hash_code for each object and then
+/// combining them, this (as an optimization) directly combines the integers.
+template <typename InputIteratorT>
+hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
+  typedef typename std::iterator_traits<InputIteratorT>::value_type ValueT;
+  const size_t seed = get_execution_seed();
+  char buffer[64], *buffer_ptr = buffer;
+  char *const buffer_end = buffer_ptr + array_lengthof(buffer);
+  while (first != last && store_and_advance(buffer_ptr, buffer_end,
+                                            get_hashable_data(*first)))
+    ++first;
+  if (first == last)
+    return hash_short(buffer, buffer_ptr - buffer, seed);
+  assert(buffer_ptr == buffer_end);
+
+  hash_state state = state.create(buffer, seed);
+  size_t length = 64;
+  while (first != last) {
+    // Fill up the buffer. We don't clear it, which re-mixes the last round
+    // when only a partial 64-byte chunk is left.
+    buffer_ptr = buffer;
+    while (first != last && store_and_advance(buffer_ptr, buffer_end,
+                                              get_hashable_data(*first)))
+      ++first;
+
+    // Rotate the buffer if we did a partial fill in order to simulate doing
+    // a mix of the last 64-bytes. That is how the algorithm works when we
+    // have a contiguous byte sequence, and we want to emulate that here.
+    std::rotate(buffer, buffer_ptr, buffer_end);
+
+    // Mix this chunk into the current state.
+    state.mix(buffer);
+    length += buffer_ptr - buffer;
+  };
+
+  return state.finalize(length);
+}
+
+/// \brief Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is integral
+/// and when the input iterator is actually a pointer. Rather than computing
+/// a hash_code for each object and then combining them, this (as an
+/// optimization) directly combines the integers. Also, because the integers
+/// are stored in contiguous memory, this routine avoids copying each value
+/// and directly reads from the underlying memory.
+template <typename ValueT>
+typename enable_if<is_hashable_data<ValueT>, hash_code>::type
+hash_combine_range_impl(ValueT *first, ValueT *last) {
+  const size_t seed = get_execution_seed();
+  const char *s_begin = reinterpret_cast<const char *>(first);
+  const char *s_end = reinterpret_cast<const char *>(last);
+  const size_t length = std::distance(s_begin, s_end);
+  if (length <= 64)
+    return hash_short(s_begin, length, seed);
+
+  const char *s_aligned_end = s_begin + (length & ~63);
+  hash_state state = state.create(s_begin, seed);
+  s_begin += 64;
+  while (s_begin != s_aligned_end) {
+    state.mix(s_begin);
+    s_begin += 64;
+  }
+  if (length & 63)
+    state.mix(s_end - 64);
+
+  return state.finalize(length);
+}
+
+} // namespace detail
+} // namespace hashing
+
+
+/// \brief Compute a hash_code for a sequence of values.
+///
+/// This hashes a sequence of values. It produces the same hash_code as
+/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
+/// and is significantly faster given pointers and types which can be hashed as
+/// a sequence of bytes.
+template <typename InputIteratorT>
+hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
+  return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
+}
+
+
+// Implementation details for hash_combine.
+namespace hashing {
+namespace detail {
+
+/// \brief Helper class to manage the recursive combining of hash_combine
+/// arguments.
+///
+/// This class exists to manage the state and various calls involved in the
+/// recursive combining of arguments used in hash_combine. It is particularly
+/// useful at minimizing the code in the recursive calls to ease the pain
+/// caused by a lack of variadic functions.
+struct hash_combine_recursive_helper {
+  char buffer[64];
+  hash_state state;
+  const size_t seed;
+
+public:
+  /// \brief Construct a recursive hash combining helper.
+  ///
+  /// This sets up the state for a recursive hash combine, including getting
+  /// the seed and buffer setup.
+  hash_combine_recursive_helper()
+    : seed(get_execution_seed()) {}
+
+  /// \brief Combine one chunk of data into the current in-flight hash.
+  ///
+  /// This merges one chunk of data into the hash. First it tries to buffer
+  /// the data. If the buffer is full, it hashes the buffer into its
+  /// hash_state, empties it, and then merges the new chunk in. This also
+  /// handles cases where the data straddles the end of the buffer.
+  template <typename T>
+  char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
+    if (!store_and_advance(buffer_ptr, buffer_end, data)) {
+      // Check for skew which prevents the buffer from being packed, and do
+      // a partial store into the buffer to fill it. This is only a concern
+      // with the variadic combine because that formation can have varying
+      // argument types.
+      size_t partial_store_size = buffer_end - buffer_ptr;
+      memcpy(buffer_ptr, &data, partial_store_size);
+
+      // If the store fails, our buffer is full and ready to hash. We have to
+      // either initialize the hash state (on the first full buffer) or mix
+      // this buffer into the existing hash state. Length tracks the *hashed*
+      // length, not the buffered length.
+      if (length == 0) {
+        state = state.create(buffer, seed);
+        length = 64;
+      } else {
+        // Mix this chunk into the current state and bump length up by 64.
+        state.mix(buffer);
+        length += 64;
+      }
+      // Reset the buffer_ptr to the head of the buffer for the next chunk of
+      // data.
+      buffer_ptr = buffer;
+
+      // Try again to store into the buffer -- this cannot fail as we only
+      // store types smaller than the buffer.
+      if (!store_and_advance(buffer_ptr, buffer_end, data,
+                             partial_store_size))
+        abort();
+    }
+    return buffer_ptr;
+  }
+
+#if defined(__has_feature) && __has_feature(__cxx_variadic_templates__)
+
+  /// \brief Recursive, variadic combining method.
+  ///
+  /// This function recurses through each argument, combining that argument
+  /// into a single hash.
+  template <typename T, typename ...Ts>
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+                    const T &arg, const Ts &...args) {
+    buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));
+
+    // Recurse to the next argument.
+    return combine(length, buffer_ptr, buffer_end, args...);
+  }
+
+#else
+  // Manually expanded recursive combining methods. See variadic above for
+  // documentation.
+
+  template <typename T1, typename T2, typename T3, typename T4, typename T5,
+            typename T6>
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+                    const T1 &arg1, const T2 &arg2, const T3 &arg3,
+                    const T4 &arg4, const T5 &arg5, const T6 &arg6) {
+    buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+    return combine(length, buffer_ptr, buffer_end, arg2, arg3, arg4, arg5, arg6);
+  }
+  template <typename T1, typename T2, typename T3, typename T4, typename T5>
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+                    const T1 &arg1, const T2 &arg2, const T3 &arg3,
+                    const T4 &arg4, const T5 &arg5) {
+    buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+    return combine(length, buffer_ptr, buffer_end, arg2, arg3, arg4, arg5);
+  }
+  template <typename T1, typename T2, typename T3, typename T4>
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+                    const T1 &arg1, const T2 &arg2, const T3 &arg3,
+                    const T4 &arg4) {
+    buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+    return combine(length, buffer_ptr, buffer_end, arg2, arg3, arg4);
+  }
+  template <typename T1, typename T2, typename T3>
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+                    const T1 &arg1, const T2 &arg2, const T3 &arg3) {
+    buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+    return combine(length, buffer_ptr, buffer_end, arg2, arg3);
+  }
+  template <typename T1, typename T2>
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+                    const T1 &arg1, const T2 &arg2) {
+    buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+    return combine(length, buffer_ptr, buffer_end, arg2);
+  }
+  template <typename T1>
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+                    const T1 &arg1) {
+    buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+    return combine(length, buffer_ptr, buffer_end);
+  }
+
+#endif
+
+  /// \brief Base case for recursive, variadic combining.
+  ///
+  /// The base case when combining arguments recursively is reached when all
+  /// arguments have been handled. It flushes the remaining buffer and
+  /// constructs a hash_code.
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
+    // Check whether the entire set of values fit in the buffer. If so, we'll
+    // use the optimized short hashing routine and skip state entirely.
+    if (length == 0)
+      return hash_short(buffer, buffer_ptr - buffer, seed);
+
+    // Mix the final buffer, rotating it if we did a partial fill in order to
+    // simulate doing a mix of the last 64-bytes. That is how the algorithm
+    // works when we have a contiguous byte sequence, and we want to emulate
+    // that here.
+    std::rotate(buffer, buffer_ptr, buffer_end);
+
+    // Mix this chunk into the current state.
+    state.mix(buffer);
+    length += buffer_ptr - buffer;
+
+    return state.finalize(length);
+  }
+};
+
+} // namespace detail
+} // namespace hashing
+
+
+#if __has_feature(__cxx_variadic_templates__)
+
+/// \brief Combine values into a single hash_code.
+///
+/// This routine accepts a varying number of arguments of any type. It will
+/// attempt to combine them into a single hash_code. For user-defined types it
+/// attempts to call a \see hash_value overload (via ADL) for the type. For
+/// integer and pointer types it directly combines their data into the
+/// resulting hash_code.
+///
+/// The result is suitable for returning from a user's hash_value
+/// *implementation* for their user-defined type. Consumers of a type should
+/// *not* call this routine, they should instead call 'hash_value'.
+template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
+  // Recursively hash each argument using a helper class.
+  ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+  return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
+}
+
+#else
+
+// What follows are manually exploded overloads for each argument width. See
+// the above variadic definition for documentation and specification.
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+          typename T6>
+hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3,
+                       const T4 &arg4, const T5 &arg5, const T6 &arg6) {
+  ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+  return helper.combine(0, helper.buffer, helper.buffer + 64,
+                        arg1, arg2, arg3, arg4, arg5, arg6);
+}
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3,
+                       const T4 &arg4, const T5 &arg5) {
+  ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+  return helper.combine(0, helper.buffer, helper.buffer + 64,
+                        arg1, arg2, arg3, arg4, arg5);
+}
+template <typename T1, typename T2, typename T3, typename T4>
+hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3,
+                       const T4 &arg4) {
+  ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+  return helper.combine(0, helper.buffer, helper.buffer + 64,
+                        arg1, arg2, arg3, arg4);
+}
+template <typename T1, typename T2, typename T3>
+hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3) {
+  ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+  return helper.combine(0, helper.buffer, helper.buffer + 64, arg1, arg2, arg3);
+}
+template <typename T1, typename T2>
+hash_code hash_combine(const T1 &arg1, const T2 &arg2) {
+  ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+  return helper.combine(0, helper.buffer, helper.buffer + 64, arg1, arg2);
+}
+template <typename T1>
+hash_code hash_combine(const T1 &arg1) {
+  ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+  return helper.combine(0, helper.buffer, helper.buffer + 64, arg1);
+}
+
+#endif
+
+
+// Implementation details for implementatinos of hash_value overloads provided
+// here.
+namespace hashing {
+namespace detail {
+
+/// \brief Helper to hash the value of a single integer.
+///
+/// Overloads for smaller integer types are not provided to ensure consistent
+/// behavior in the presence of integral promotions. Essentially,
+/// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
+inline hash_code hash_integer_value(uint64_t value) {
+  // Similar to hash_4to8_bytes but using a seed instead of length.
+  const uint64_t seed = get_execution_seed();
+  const char *s = reinterpret_cast<const char *>(&value);
+  const uint64_t a = fetch32(s);
+  return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
+}
+
+} // namespace detail
+} // namespace hashing
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+typename enable_if<is_integral_or_enum<T>, hash_code>::type
+hash_value(T value) {
+  return ::llvm::hashing::detail::hash_integer_value(value);
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T> hash_code hash_value(const T *ptr) {
+  return ::llvm::hashing::detail::hash_integer_value(
+    reinterpret_cast<uintptr_t>(ptr));
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg) {
+  return hash_combine(arg.first, arg.second);
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg) {
+  return hash_combine_range(arg.begin(), arg.end());
+}
+
+} // namespace llvm
+
+#endif
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/ADT/SparseSet.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/ADT/SparseSet.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,268 @@
+//===--- llvm/ADT/SparseSet.h - Sparse set ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseSet class derived from the version described in
+// Briggs, Torczon, "An efficient representation for sparse sets", ACM Letters
+// on Programming Languages and Systems, Volume 2 Issue 1-4, March-Dec.  1993.
+//
+// A sparse set holds a small number of objects identified by integer keys from
+// a moderately sized universe. The sparse set uses more memory than other
+// containers in order to provide faster operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSESET_H
+#define LLVM_ADT_SPARSESET_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
+#include <limits>
+
+namespace llvm {
+
+/// SparseSetFunctor - Objects in a SparseSet are identified by small integer
+/// keys.  A functor object is used to compute the key of an object.  The
+/// functor's operator() must return an unsigned smaller than the universe.
+///
+/// The default functor implementation forwards to a getSparseSetKey() method
+/// on the object.  It is intended for sparse sets holding ad-hoc structs.
+///
+template<typename ValueT>
+struct SparseSetFunctor {
+  unsigned operator()(const ValueT &Val) {
+    return Val.getSparseSetKey();
+  }
+};
+
+/// SparseSetFunctor<unsigned> - Provide a trivial identity functor for
+/// SparseSet<unsigned>.
+///
+template<> struct SparseSetFunctor<unsigned> {
+  unsigned operator()(unsigned Val) { return Val; }
+};
+
+/// SparseSet - Fast set implementation for objects that can be identified by
+/// small unsigned keys.
+///
+/// SparseSet allocates memory proportional to the size of the key universe, so
+/// it is not recommended for building composite data structures.  It is useful
+/// for algorithms that require a single set with fast operations.
+///
+/// Compared to DenseSet and DenseMap, SparseSet provides constant-time fast
+/// clear() and iteration as fast as a vector.  The find(), insert(), and
+/// erase() operations are all constant time, and typically faster than a hash
+/// table.  The iteration order doesn't depend on numerical key values, it only
+/// depends on the order of insert() and erase() operations.  When no elements
+/// have been erased, the iteration order is the insertion order.
+///
+/// Compared to BitVector, SparseSet<unsigned> uses 8x-40x more memory, but
+/// offers constant-time clear() and size() operations as well as fast
+/// iteration independent on the size of the universe.
+///
+/// SparseSet contains a dense vector holding all the objects and a sparse
+/// array holding indexes into the dense vector.  Most of the memory is used by
+/// the sparse array which is the size of the key universe.  The SparseT
+/// template parameter provides a space/speed tradeoff for sets holding many
+/// elements.
+///
+/// When SparseT is uint32_t, find() only touches 2 cache lines, but the sparse
+/// array uses 4 x Universe bytes.
+///
+/// When SparseT is uint8_t (the default), find() touches up to 2+[N/256] cache
+/// lines, but the sparse array is 4x smaller.  N is the number of elements in
+/// the set.
+///
+/// For sets that may grow to thousands of elements, SparseT should be set to
+/// uint16_t or uint32_t.
+///
+/// @param ValueT      The type of objects in the set.
+/// @param SparseT     An unsigned integer type. See above.
+/// @param KeyFunctorT A functor that computes the unsigned key of a ValueT.
+///
+template<typename ValueT,
+         typename SparseT = uint8_t,
+         typename KeyFunctorT = SparseSetFunctor<ValueT> >
+class SparseSet {
+  typedef SmallVector<ValueT, 8> DenseT;
+  DenseT Dense;
+  SparseT *Sparse;
+  unsigned Universe;
+  KeyFunctorT KeyOf;
+
+  // Disable copy construction and assignment.
+  // This data structure is not meant to be used that way.
+  SparseSet(const SparseSet&); // DO NOT IMPLEMENT.
+  SparseSet &operator=(const SparseSet&); // DO NOT IMPLEMENT.
+
+public:
+  typedef ValueT value_type;
+  typedef ValueT &reference;
+  typedef const ValueT &const_reference;
+  typedef ValueT *pointer;
+  typedef const ValueT *const_pointer;
+
+  SparseSet() : Sparse(0), Universe(0) {}
+  ~SparseSet() { free(Sparse); }
+
+  /// setUniverse - Set the universe size which determines the largest key the
+  /// set can hold.  The universe must be sized before any elements can be
+  /// added.
+  ///
+  /// @param U Universe size. All object keys must be less than U.
+  ///
+  void setUniverse(unsigned U) {
+    // It's not hard to resize the universe on a non-empty set, but it doesn't
+    // seem like a likely use case, so we can add that code when we need it.
+    assert(empty() && "Can only resize universe on an empty map");
+    // Hysteresis prevents needless reallocations.
+    if (U >= Universe/4 && U <= Universe)
+      return;
+    free(Sparse);
+    // The Sparse array doesn't actually need to be initialized, so malloc
+    // would be enough here, but that will cause tools like valgrind to
+    // complain about branching on uninitialized data.
+    Sparse = reinterpret_cast<SparseT*>(calloc(U, sizeof(SparseT)));
+    Universe = U;
+  }
+
+  // Import trivial vector stuff from DenseT.
+  typedef typename DenseT::iterator iterator;
+  typedef typename DenseT::const_iterator const_iterator;
+
+  const_iterator begin() const { return Dense.begin(); }
+  const_iterator end() const { return Dense.end(); }
+  iterator begin() { return Dense.begin(); }
+  iterator end() { return Dense.end(); }
+
+  /// empty - Returns true if the set is empty.
+  ///
+  /// This is not the same as BitVector::empty().
+  ///
+  bool empty() const { return Dense.empty(); }
+
+  /// size - Returns the number of elements in the set.
+  ///
+  /// This is not the same as BitVector::size() which returns the size of the
+  /// universe.
+  ///
+  unsigned size() const { return Dense.size(); }
+
+  /// clear - Clears the set.  This is a very fast constant time operation.
+  ///
+  void clear() {
+    // Sparse does not need to be cleared, see find().
+    Dense.clear();
+  }
+
+  /// find - Find an element by its key.
+  ///
+  /// @param   Key A valid key to find.
+  /// @returns An iterator to the element identified by key, or end().
+  ///
+  iterator find(unsigned Key) {
+    assert(Key < Universe && "Key out of range");
+    assert(std::numeric_limits<SparseT>::is_integer &&
+           !std::numeric_limits<SparseT>::is_signed &&
+           "SparseT must be an unsigned integer type");
+    const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
+    for (unsigned i = Sparse[Key], e = size(); i < e; i += Stride) {
+      const unsigned FoundKey = KeyOf(Dense[i]);
+      assert(FoundKey < Universe && "Invalid key in set. Did object mutate?");
+      if (Key == FoundKey)
+        return begin() + i;
+      // Stride is 0 when SparseT >= unsigned.  We don't need to loop.
+      if (!Stride)
+        break;
+    }
+    return end();
+  }
+
+  const_iterator find(unsigned Key) const {
+    return const_cast<SparseSet*>(this)->find(Key);
+  }
+
+  /// count - Returns true if this set contains an element identified by Key.
+  ///
+  bool count(unsigned Key) const {
+    return find(Key) != end();
+  }
+
+  /// insert - Attempts to insert a new element.
+  ///
+  /// If Val is successfully inserted, return (I, true), where I is an iterator
+  /// pointing to the newly inserted element.
+  ///
+  /// If the set already contains an element with the same key as Val, return
+  /// (I, false), where I is an iterator pointing to the existing element.
+  ///
+  /// Insertion invalidates all iterators.
+  ///
+  std::pair<iterator, bool> insert(const ValueT &Val) {
+    unsigned Key = KeyOf(Val);
+    iterator I = find(Key);
+    if (I != end())
+      return std::make_pair(I, false);
+    Sparse[Key] = size();
+    Dense.push_back(Val);
+    return std::make_pair(end() - 1, true);
+  }
+
+  /// array subscript - If an element already exists with this key, return it.
+  /// Otherwise, automatically construct a new value from Key, insert it,
+  /// and return the newly inserted element.
+  ValueT &operator[](unsigned Key) {
+    return *insert(ValueT(Key)).first;
+  }
+
+  /// erase - Erases an existing element identified by a valid iterator.
+  ///
+  /// This invalidates all iterators, but erase() returns an iterator pointing
+  /// to the next element.  This makes it possible to erase selected elements
+  /// while iterating over the set:
+  ///
+  ///   for (SparseSet::iterator I = Set.begin(); I != Set.end();)
+  ///     if (test(*I))
+  ///       I = Set.erase(I);
+  ///     else
+  ///       ++I;
+  ///
+  /// Note that end() changes when elements are erased, unlike std::list.
+  ///
+  iterator erase(iterator I) {
+    assert(unsigned(I - begin()) < size() && "Invalid iterator");
+    if (I != end() - 1) {
+      *I = Dense.back();
+      unsigned BackKey = KeyOf(Dense.back());
+      assert(BackKey < Universe && "Invalid key in set. Did object mutate?");
+      Sparse[BackKey] = I - begin();
+    }
+    // This depends on SmallVector::pop_back() not invalidating iterators.
+    // std::vector::pop_back() doesn't give that guarantee.
+    Dense.pop_back();
+    return I;
+  }
+
+  /// erase - Erases an element identified by Key, if it exists.
+  ///
+  /// @param   Key The key identifying the element to erase.
+  /// @returns True when an element was erased, false if no element was found.
+  ///
+  bool erase(unsigned Key) {
+    iterator I = find(Key);
+    if (I == end())
+      return false;
+    erase(I);
+    return true;
+  }
+
+};
+
+} // end namespace llvm
+
+#endif
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/ADT/VariadicFunction.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/ADT/VariadicFunction.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,331 @@
+//===--- VariadicFunctions.h - Variadic Functions ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file implements compile-time type-safe variadic functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_VARIADIC_FUNCTION_H
+#define LLVM_ADT_VARIADIC_FUNCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+
+// Define macros to aid in expanding a comma separated series with the index of
+// the series pasted onto the last token.
+#define LLVM_COMMA_JOIN1(x) x ## 0
+#define LLVM_COMMA_JOIN2(x) LLVM_COMMA_JOIN1(x), x ## 1
+#define LLVM_COMMA_JOIN3(x) LLVM_COMMA_JOIN2(x), x ## 2
+#define LLVM_COMMA_JOIN4(x) LLVM_COMMA_JOIN3(x), x ## 3
+#define LLVM_COMMA_JOIN5(x) LLVM_COMMA_JOIN4(x), x ## 4
+#define LLVM_COMMA_JOIN6(x) LLVM_COMMA_JOIN5(x), x ## 5
+#define LLVM_COMMA_JOIN7(x) LLVM_COMMA_JOIN6(x), x ## 6
+#define LLVM_COMMA_JOIN8(x) LLVM_COMMA_JOIN7(x), x ## 7
+#define LLVM_COMMA_JOIN9(x) LLVM_COMMA_JOIN8(x), x ## 8
+#define LLVM_COMMA_JOIN10(x) LLVM_COMMA_JOIN9(x), x ## 9
+#define LLVM_COMMA_JOIN11(x) LLVM_COMMA_JOIN10(x), x ## 10
+#define LLVM_COMMA_JOIN12(x) LLVM_COMMA_JOIN11(x), x ## 11
+#define LLVM_COMMA_JOIN13(x) LLVM_COMMA_JOIN12(x), x ## 12
+#define LLVM_COMMA_JOIN14(x) LLVM_COMMA_JOIN13(x), x ## 13
+#define LLVM_COMMA_JOIN15(x) LLVM_COMMA_JOIN14(x), x ## 14
+#define LLVM_COMMA_JOIN16(x) LLVM_COMMA_JOIN15(x), x ## 15
+#define LLVM_COMMA_JOIN17(x) LLVM_COMMA_JOIN16(x), x ## 16
+#define LLVM_COMMA_JOIN18(x) LLVM_COMMA_JOIN17(x), x ## 17
+#define LLVM_COMMA_JOIN19(x) LLVM_COMMA_JOIN18(x), x ## 18
+#define LLVM_COMMA_JOIN20(x) LLVM_COMMA_JOIN19(x), x ## 19
+#define LLVM_COMMA_JOIN21(x) LLVM_COMMA_JOIN20(x), x ## 20
+#define LLVM_COMMA_JOIN22(x) LLVM_COMMA_JOIN21(x), x ## 21
+#define LLVM_COMMA_JOIN23(x) LLVM_COMMA_JOIN22(x), x ## 22
+#define LLVM_COMMA_JOIN24(x) LLVM_COMMA_JOIN23(x), x ## 23
+#define LLVM_COMMA_JOIN25(x) LLVM_COMMA_JOIN24(x), x ## 24
+#define LLVM_COMMA_JOIN26(x) LLVM_COMMA_JOIN25(x), x ## 25
+#define LLVM_COMMA_JOIN27(x) LLVM_COMMA_JOIN26(x), x ## 26
+#define LLVM_COMMA_JOIN28(x) LLVM_COMMA_JOIN27(x), x ## 27
+#define LLVM_COMMA_JOIN29(x) LLVM_COMMA_JOIN28(x), x ## 28
+#define LLVM_COMMA_JOIN30(x) LLVM_COMMA_JOIN29(x), x ## 29
+#define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30
+#define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31
+
+/// \brief Class which can simulate a type-safe variadic function.
+///
+/// The VariadicFunction class template makes it easy to define
+/// type-safe variadic functions where all arguments have the same
+/// type.
+///
+/// Suppose we need a variadic function like this:
+///
+///   ResultT Foo(const ArgT &A_0, const ArgT &A_1, ..., const ArgT &A_N);
+///
+/// Instead of many overloads of Foo(), we only need to define a helper
+/// function that takes an array of arguments:
+///
+///   ResultT FooImpl(ArrayRef<const ArgT *> Args) {
+///     // 'Args[i]' is a pointer to the i-th argument passed to Foo().
+///     ...
+///   }
+///
+/// and then define Foo() like this:
+///
+///   const VariadicFunction<ResultT, ArgT, FooImpl> Foo;
+///
+/// VariadicFunction takes care of defining the overloads of Foo().
+///
+/// Actually, Foo is a function object (i.e. functor) instead of a plain
+/// function.  This object is stateless and its constructor/destructor
+/// does nothing, so it's safe to create global objects and call Foo(...) at
+/// any time.
+///
+/// Sometimes we need a variadic function to have some fixed leading
+/// arguments whose types may be different from that of the optional
+/// arguments.  For example:
+///
+///   bool FullMatch(const StringRef &S, const RE &Regex,
+///                  const ArgT &A_0, ..., const ArgT &A_N);
+///
+/// VariadicFunctionN is for such cases, where N is the number of fixed
+/// arguments.  It is like VariadicFunction, except that it takes N more
+/// template arguments for the types of the fixed arguments:
+///
+///   bool FullMatchImpl(const StringRef &S, const RE &Regex,
+///                      ArrayRef<const ArgT *> Args) { ... }
+///   const VariadicFunction2<bool, const StringRef&,
+///                           const RE&, ArgT, FullMatchImpl>
+///       FullMatch;
+///
+/// Currently VariadicFunction and friends support up-to 3
+/// fixed leading arguments and up-to 32 optional arguments.
+template <typename ResultT, typename ArgT,
+          ResultT (*Func)(ArrayRef<const ArgT *>)>
+struct VariadicFunction {
+  ResultT operator()() const {
+    return Func(ArrayRef<const ArgT *>());
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename ArgT,
+          ResultT (*Func)(Param0T, ArrayRef<const ArgT *>)>
+struct VariadicFunction1 {
+  ResultT operator()(Param0T P0) const {
+    return Func(P0, ArrayRef<const ArgT *>());
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(Param0T P0, LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(P0, makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename Param1T, typename ArgT,
+          ResultT (*Func)(Param0T, Param1T, ArrayRef<const ArgT *>)>
+struct VariadicFunction2 {
+  ResultT operator()(Param0T P0, Param1T P1) const {
+    return Func(P0, P1, ArrayRef<const ArgT *>());
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(Param0T P0, Param1T P1, \
+                     LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(P0, P1, makeAraryRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename Param1T,
+          typename Param2T, typename ArgT,
+          ResultT (*Func)(Param0T, Param1T, Param2T, ArrayRef<const ArgT *>)>
+struct VariadicFunction3 {
+  ResultT operator()(Param0T P0, Param1T P1, Param2T P2) const {
+    return Func(P0, P1, P2, ArrayRef<const ArgT *>());
+  }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+  ResultT operator()(Param0T P0, Param1T P1, Param2T P2, \
+                     LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+    const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+    return Func(P0, P1, P2, makeArrayRef(Args)); \
+  }
+  LLVM_DEFINE_OVERLOAD(1)
+  LLVM_DEFINE_OVERLOAD(2)
+  LLVM_DEFINE_OVERLOAD(3)
+  LLVM_DEFINE_OVERLOAD(4)
+  LLVM_DEFINE_OVERLOAD(5)
+  LLVM_DEFINE_OVERLOAD(6)
+  LLVM_DEFINE_OVERLOAD(7)
+  LLVM_DEFINE_OVERLOAD(8)
+  LLVM_DEFINE_OVERLOAD(9)
+  LLVM_DEFINE_OVERLOAD(10)
+  LLVM_DEFINE_OVERLOAD(11)
+  LLVM_DEFINE_OVERLOAD(12)
+  LLVM_DEFINE_OVERLOAD(13)
+  LLVM_DEFINE_OVERLOAD(14)
+  LLVM_DEFINE_OVERLOAD(15)
+  LLVM_DEFINE_OVERLOAD(16)
+  LLVM_DEFINE_OVERLOAD(17)
+  LLVM_DEFINE_OVERLOAD(18)
+  LLVM_DEFINE_OVERLOAD(19)
+  LLVM_DEFINE_OVERLOAD(20)
+  LLVM_DEFINE_OVERLOAD(21)
+  LLVM_DEFINE_OVERLOAD(22)
+  LLVM_DEFINE_OVERLOAD(23)
+  LLVM_DEFINE_OVERLOAD(24)
+  LLVM_DEFINE_OVERLOAD(25)
+  LLVM_DEFINE_OVERLOAD(26)
+  LLVM_DEFINE_OVERLOAD(27)
+  LLVM_DEFINE_OVERLOAD(28)
+  LLVM_DEFINE_OVERLOAD(29)
+  LLVM_DEFINE_OVERLOAD(30)
+  LLVM_DEFINE_OVERLOAD(31)
+  LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+// Cleanup the macro namespace.
+#undef LLVM_COMMA_JOIN1
+#undef LLVM_COMMA_JOIN2
+#undef LLVM_COMMA_JOIN3
+#undef LLVM_COMMA_JOIN4
+#undef LLVM_COMMA_JOIN5
+#undef LLVM_COMMA_JOIN6
+#undef LLVM_COMMA_JOIN7
+#undef LLVM_COMMA_JOIN8
+#undef LLVM_COMMA_JOIN9
+#undef LLVM_COMMA_JOIN10
+#undef LLVM_COMMA_JOIN11
+#undef LLVM_COMMA_JOIN12
+#undef LLVM_COMMA_JOIN13
+#undef LLVM_COMMA_JOIN14
+#undef LLVM_COMMA_JOIN15
+#undef LLVM_COMMA_JOIN16
+#undef LLVM_COMMA_JOIN17
+#undef LLVM_COMMA_JOIN18
+#undef LLVM_COMMA_JOIN19
+#undef LLVM_COMMA_JOIN20
+#undef LLVM_COMMA_JOIN21
+#undef LLVM_COMMA_JOIN22
+#undef LLVM_COMMA_JOIN23
+#undef LLVM_COMMA_JOIN24
+#undef LLVM_COMMA_JOIN25
+#undef LLVM_COMMA_JOIN26
+#undef LLVM_COMMA_JOIN27
+#undef LLVM_COMMA_JOIN28
+#undef LLVM_COMMA_JOIN29
+#undef LLVM_COMMA_JOIN30
+#undef LLVM_COMMA_JOIN31
+#undef LLVM_COMMA_JOIN32
+
+} // end namespace llvm
+
+#endif  // LLVM_ADT_VARIADIC_FUNCTION_H
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/ADT/edit_distance.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/ADT/edit_distance.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,102 @@
+//===-- llvm/ADT/edit_distance.h - Array edit distance function --- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a Levenshtein distance function that works for any two
+// sequences, with each element of each sequence being analogous to a character
+// in a string.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EDIT_DISTANCE_H
+#define LLVM_ADT_EDIT_DISTANCE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <algorithm>
+
+namespace llvm {
+
+/// \brief Determine the edit distance between two sequences.
+///
+/// \param FromArray the first sequence to compare.
+///
+/// \param ToArray the second sequence to compare.
+///
+/// \param AllowReplacements whether to allow element replacements (change one
+/// element into another) as a single operation, rather than as two operations
+/// (an insertion and a removal).
+///
+/// \param MaxEditDistance If non-zero, the maximum edit distance that this
+/// routine is allowed to compute. If the edit distance will exceed that
+/// maximum, returns \c MaxEditDistance+1.
+///
+/// \returns the minimum number of element insertions, removals, or (if
+/// \p AllowReplacements is \c true) replacements needed to transform one of
+/// the given sequences into the other. If zero, the sequences are identical.
+template<typename T>
+unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
+                             bool AllowReplacements = true,
+                             unsigned MaxEditDistance = 0) {
+  // The algorithm implemented below is the "classic"
+  // dynamic-programming algorithm for computing the Levenshtein
+  // distance, which is described here:
+  //
+  //   http://en.wikipedia.org/wiki/Levenshtein_distance
+  //
+  // Although the algorithm is typically described using an m x n
+  // array, only two rows are used at a time, so this implemenation
+  // just keeps two separate vectors for those two rows.
+  typename ArrayRef<T>::size_type m = FromArray.size();
+  typename ArrayRef<T>::size_type n = ToArray.size();
+
+  const unsigned SmallBufferSize = 64;
+  unsigned SmallBuffer[SmallBufferSize];
+  llvm::OwningArrayPtr<unsigned> Allocated;
+  unsigned *Previous = SmallBuffer;
+  if (2*(n + 1) > SmallBufferSize) {
+    Previous = new unsigned [2*(n+1)];
+    Allocated.reset(Previous);
+  }
+  unsigned *Current = Previous + (n + 1);
+
+  for (unsigned i = 0; i <= n; ++i)
+    Previous[i] = i;
+
+  for (typename ArrayRef<T>::size_type y = 1; y <= m; ++y) {
+    Current[0] = y;
+    unsigned BestThisRow = Current[0];
+
+    for (typename ArrayRef<T>::size_type x = 1; x <= n; ++x) {
+      if (AllowReplacements) {
+        Current[x] = std::min(
+            Previous[x-1] + (FromArray[y-1] == ToArray[x-1] ? 0u : 1u),
+            std::min(Current[x-1], Previous[x])+1);
+      }
+      else {
+        if (FromArray[y-1] == ToArray[x-1]) Current[x] = Previous[x-1];
+        else Current[x] = std::min(Current[x-1], Previous[x]) + 1;
+      }
+      BestThisRow = std::min(BestThisRow, Current[x]);
+    }
+
+    if (MaxEditDistance && BestThisRow > MaxEditDistance)
+      return MaxEditDistance + 1;
+
+    unsigned *tmp = Current;
+    Current = Previous;
+    Previous = tmp;
+  }
+
+  unsigned Result = Previous[n];
+  return Result;
+}
+
+} // End llvm namespace
+
+#endif
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,167 @@
+//=- llvm/CodeGen/DFAPacketizer.h - DFA Packetizer for VLIW ---*- C++ -*-=====//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This class implements a deterministic finite automaton (DFA) based
+// packetizing mechanism for VLIW architectures. It provides APIs to
+// determine whether there exists a legal mapping of instructions to
+// functional unit assignments in a packet. The DFA is auto-generated from
+// the target's Schedule.td file.
+//
+// A DFA consists of 3 major elements: states, inputs, and transitions. For
+// the packetizing mechanism, the input is the set of instruction classes for
+// a target. The state models all possible combinations of functional unit
+// consumption for a given set of instructions in a packet. A transition
+// models the addition of an instruction to a packet. In the DFA constructed
+// by this class, if an instruction can be added to a packet, then a valid
+// transition exists from the corresponding state. Invalid transitions
+// indicate that the instruction cannot be added to the current packet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DFAPACKETIZER_H
+#define LLVM_CODEGEN_DFAPACKETIZER_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/ADT/DenseMap.h"
+#include <map>
+
+namespace llvm {
+
+class MCInstrDesc;
+class MachineInstr;
+class MachineLoopInfo;
+class MachineDominatorTree;
+class InstrItineraryData;
+class DefaultVLIWScheduler;
+class SUnit;
+
+class DFAPacketizer {
+private:
+  typedef std::pair<unsigned, unsigned> UnsignPair;
+  const InstrItineraryData *InstrItins;
+  int CurrentState;
+  const int (*DFAStateInputTable)[2];
+  const unsigned *DFAStateEntryTable;
+
+  // CachedTable is a map from <FromState, Input> to ToState.
+  DenseMap<UnsignPair, unsigned> CachedTable;
+
+  // ReadTable - Read the DFA transition table and update CachedTable.
+  void ReadTable(unsigned int state);
+
+public:
+  DFAPacketizer(const InstrItineraryData *I, const int (*SIT)[2],
+                const unsigned *SET);
+
+  // Reset the current state to make all resources available.
+  void clearResources() {
+    CurrentState = 0;
+  }
+
+  // canReserveResources - Check if the resources occupied by a MCInstrDesc
+  // are available in the current state.
+  bool canReserveResources(const llvm::MCInstrDesc *MID);
+
+  // reserveResources - Reserve the resources occupied by a MCInstrDesc and
+  // change the current state to reflect that change.
+  void reserveResources(const llvm::MCInstrDesc *MID);
+
+  // canReserveResources - Check if the resources occupied by a machine
+  // instruction are available in the current state.
+  bool canReserveResources(llvm::MachineInstr *MI);
+
+  // reserveResources - Reserve the resources occupied by a machine
+  // instruction and change the current state to reflect that change.
+  void reserveResources(llvm::MachineInstr *MI);
+
+  const InstrItineraryData *getInstrItins() const { return InstrItins; }
+};
+
+// VLIWPacketizerList - Implements a simple VLIW packetizer using DFA. The
+// packetizer works on machine basic blocks. For each instruction I in BB, the
+// packetizer consults the DFA to see if machine resources are available to
+// execute I. If so, the packetizer checks if I depends on any instruction J in
+// the current packet. If no dependency is found, I is added to current packet
+// and machine resource is marked as taken. If any dependency is found, a target
+// API call is made to prune the dependence.
+class VLIWPacketizerList {
+protected:
+  const TargetMachine &TM;
+  const MachineFunction &MF;
+  const TargetInstrInfo *TII;
+
+  // The VLIW Scheduler.
+  DefaultVLIWScheduler *VLIWScheduler;
+
+  // Vector of instructions assigned to the current packet.
+  std::vector<MachineInstr*> CurrentPacketMIs;
+  // DFA resource tracker.
+  DFAPacketizer *ResourceTracker;
+
+  // Generate MI -> SU map.
+  std::map<MachineInstr*, SUnit*> MIToSUnit;
+
+public:
+  VLIWPacketizerList(
+    MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
+    bool IsPostRA);
+
+  virtual ~VLIWPacketizerList();
+
+  // PacketizeMIs - Implement this API in the backend to bundle instructions.
+  void PacketizeMIs(MachineBasicBlock *MBB,
+                    MachineBasicBlock::iterator BeginItr,
+                    MachineBasicBlock::iterator EndItr);
+
+  // getResourceTracker - return ResourceTracker
+  DFAPacketizer *getResourceTracker() {return ResourceTracker;}
+
+  // addToPacket - Add MI to the current packet.
+  virtual MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
+    MachineBasicBlock::iterator MII = MI;
+    CurrentPacketMIs.push_back(MI);
+    ResourceTracker->reserveResources(MI);
+    return MII;
+  }
+
+  // endPacket - End the current packet.
+  void endPacket(MachineBasicBlock *MBB, MachineInstr *MI);
+
+  // initPacketizerState - perform initialization before packetizing
+  // an instruction. This function is supposed to be overrided by
+  // the target dependent packetizer.
+  virtual void initPacketizerState(void) { return; }
+
+  // ignorePseudoInstruction - Ignore bundling of pseudo instructions.
+  virtual bool ignorePseudoInstruction(MachineInstr *I,
+                                       MachineBasicBlock *MBB) {
+    return false;
+  }
+
+  // isSoloInstruction - return true if instruction MI can not be packetized
+  // with any other instruction, which means that MI itself is a packet.
+  virtual bool isSoloInstruction(MachineInstr *MI) {
+    return true;
+  }
+
+  // isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
+  // together.
+  virtual bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
+    return false;
+  }
+
+  // isLegalToPruneDependencies - Is it legal to prune dependece between SUI
+  // and SUJ.
+  virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
+    return false;
+  }
+
+};
+}
+
+#endif
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,207 @@
+//===---- LiveRangeEdit.h - Basic tools for split and spill -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The LiveRangeEdit class represents changes done to a virtual register when it
+// is spilled or split.
+//
+// The parent register is never changed. Instead, a number of new virtual
+// registers are created and added to the newRegs vector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVERANGEEDIT_H
+#define LLVM_CODEGEN_LIVERANGEEDIT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+class AliasAnalysis;
+class LiveIntervals;
+class MachineLoopInfo;
+class MachineRegisterInfo;
+class VirtRegMap;
+
+class LiveRangeEdit {
+public:
+  /// Callback methods for LiveRangeEdit owners.
+  class Delegate {
+    virtual void anchor();
+  public:
+    /// Called immediately before erasing a dead machine instruction.
+    virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}
+
+    /// Called when a virtual register is no longer used. Return false to defer
+    /// its deletion from LiveIntervals.
+    virtual bool LRE_CanEraseVirtReg(unsigned) { return true; }
+
+    /// Called before shrinking the live range of a virtual register.
+    virtual void LRE_WillShrinkVirtReg(unsigned) {}
+
+    /// Called after cloning a virtual register.
+    /// This is used for new registers representing connected components of Old.
+    virtual void LRE_DidCloneVirtReg(unsigned New, unsigned Old) {}
+
+    virtual ~Delegate() {}
+  };
+
+private:
+  LiveInterval &parent_;
+  SmallVectorImpl<LiveInterval*> &newRegs_;
+  MachineRegisterInfo &MRI;
+  LiveIntervals &LIS;
+  VirtRegMap *VRM;
+  const TargetInstrInfo &TII;
+  Delegate *const delegate_;
+
+  /// firstNew_ - Index of the first register added to newRegs_.
+  const unsigned firstNew_;
+
+  /// scannedRemattable_ - true when remattable values have been identified.
+  bool scannedRemattable_;
+
+  /// remattable_ - Values defined by remattable instructions as identified by
+  /// tii.isTriviallyReMaterializable().
+  SmallPtrSet<const VNInfo*,4> remattable_;
+
+  /// rematted_ - Values that were actually rematted, and so need to have their
+  /// live range trimmed or entirely removed.
+  SmallPtrSet<const VNInfo*,4> rematted_;
+
+  /// scanRemattable - Identify the parent_ values that may rematerialize.
+  void scanRemattable(AliasAnalysis *aa);
+
+  /// allUsesAvailableAt - Return true if all registers used by OrigMI at
+  /// OrigIdx are also available with the same value at UseIdx.
+  bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
+                          SlotIndex UseIdx);
+
+  /// foldAsLoad - If LI has a single use and a single def that can be folded as
+  /// a load, eliminate the register by folding the def into the use.
+  bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead);
+
+public:
+  /// Create a LiveRangeEdit for breaking down parent into smaller pieces.
+  /// @param parent The register being spilled or split.
+  /// @param newRegs List to receive any new registers created. This needn't be
+  ///                empty initially, any existing registers are ignored.
+  /// @param MF The MachineFunction the live range edit is taking place in.
+  /// @param lis The collection of all live intervals in this function.
+  /// @param vrm Map of virtual registers to physical registers for this
+  ///            function.  If NULL, no virtual register map updates will
+  ///            be done.  This could be the case if called before Regalloc.
+  LiveRangeEdit(LiveInterval &parent,
+                SmallVectorImpl<LiveInterval*> &newRegs,
+                MachineFunction &MF,
+                LiveIntervals &lis,
+                VirtRegMap *vrm,
+                Delegate *delegate = 0)
+    : parent_(parent), newRegs_(newRegs),
+      MRI(MF.getRegInfo()), LIS(lis), VRM(vrm),
+      TII(*MF.getTarget().getInstrInfo()),
+      delegate_(delegate),
+      firstNew_(newRegs.size()),
+      scannedRemattable_(false) {}
+
+  LiveInterval &getParent() const { return parent_; }
+  unsigned getReg() const { return parent_.reg; }
+
+  /// Iterator for accessing the new registers added by this edit.
+  typedef SmallVectorImpl<LiveInterval*>::const_iterator iterator;
+  iterator begin() const { return newRegs_.begin()+firstNew_; }
+  iterator end() const { return newRegs_.end(); }
+  unsigned size() const { return newRegs_.size()-firstNew_; }
+  bool empty() const { return size() == 0; }
+  LiveInterval *get(unsigned idx) const { return newRegs_[idx+firstNew_]; }
+
+  ArrayRef<LiveInterval*> regs() const {
+    return makeArrayRef(newRegs_).slice(firstNew_);
+  }
+
+  /// createFrom - Create a new virtual register based on OldReg.
+  LiveInterval &createFrom(unsigned OldReg);
+
+  /// create - Create a new register with the same class and original slot as
+  /// parent.
+  LiveInterval &create() {
+    return createFrom(getReg());
+  }
+
+  /// anyRematerializable - Return true if any parent values may be
+  /// rematerializable.
+  /// This function must be called before any rematerialization is attempted.
+  bool anyRematerializable(AliasAnalysis*);
+
+  /// checkRematerializable - Manually add VNI to the list of rematerializable
+  /// values if DefMI may be rematerializable.
+  bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI,
+                             AliasAnalysis*);
+
+  /// Remat - Information needed to rematerialize at a specific location.
+  struct Remat {
+    VNInfo *ParentVNI;      // parent_'s value at the remat location.
+    MachineInstr *OrigMI;   // Instruction defining ParentVNI.
+    explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(0) {}
+  };
+
+  /// canRematerializeAt - Determine if ParentVNI can be rematerialized at
+  /// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI.
+  /// When cheapAsAMove is set, only cheap remats are allowed.
+  bool canRematerializeAt(Remat &RM,
+                          SlotIndex UseIdx,
+                          bool cheapAsAMove);
+
+  /// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an
+  /// instruction into MBB before MI. The new instruction is mapped, but
+  /// liveness is not updated.
+  /// Return the SlotIndex of the new instruction.
+  SlotIndex rematerializeAt(MachineBasicBlock &MBB,
+                            MachineBasicBlock::iterator MI,
+                            unsigned DestReg,
+                            const Remat &RM,
+                            const TargetRegisterInfo&,
+                            bool Late = false);
+
+  /// markRematerialized - explicitly mark a value as rematerialized after doing
+  /// it manually.
+  void markRematerialized(const VNInfo *ParentVNI) {
+    rematted_.insert(ParentVNI);
+  }
+
+  /// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
+  bool didRematerialize(const VNInfo *ParentVNI) const {
+    return rematted_.count(ParentVNI);
+  }
+
+  /// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try
+  /// to erase it from LIS.
+  void eraseVirtReg(unsigned Reg);
+
+  /// eliminateDeadDefs - Try to delete machine instructions that are now dead
+  /// (allDefsAreDead returns true). This may cause live intervals to be trimmed
+  /// and further dead efs to be eliminated.
+  /// RegsBeingSpilled lists registers currently being spilled by the register
+  /// allocator.  These registers should not be split into new intervals
+  /// as currently those new intervals are not guaranteed to spill.
+  void eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
+                         ArrayRef<unsigned> RegsBeingSpilled 
+                          = ArrayRef<unsigned>());
+
+  /// calculateRegClassAndHint - Recompute register class and hint for each new
+  /// register.
+  void calculateRegClassAndHint(MachineFunction&,
+                                const MachineLoopInfo&);
+};
+
+}
+
+#endif
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,203 @@
+//===-- CodeGen/MachineInstBundle.h - MI bundle utilities -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provide utility functions to manipulate machine instruction
+// bundles.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
+#define LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+
+namespace llvm {
+
+/// finalizeBundle - Finalize a machine instruction bundle which includes
+/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
+/// This routine adds a BUNDLE instruction to represent the bundle, it adds
+/// IsInternalRead markers to MachineOperands which are defined inside the
+/// bundle, and it copies externally visible defs and uses to the BUNDLE
+/// instruction.
+void finalizeBundle(MachineBasicBlock &MBB,
+                    MachineBasicBlock::instr_iterator FirstMI,
+                    MachineBasicBlock::instr_iterator LastMI);
+  
+/// finalizeBundle - Same functionality as the previous finalizeBundle except
+/// the last instruction in the bundle is not provided as an input. This is
+/// used in cases where bundles are pre-determined by marking instructions
+/// with 'InsideBundle' marker. It returns the MBB instruction iterator that
+/// points to the end of the bundle.
+MachineBasicBlock::instr_iterator finalizeBundle(MachineBasicBlock &MBB,
+                    MachineBasicBlock::instr_iterator FirstMI);
+
+/// finalizeBundles - Finalize instruction bundles in the specified
+/// MachineFunction. Return true if any bundles are finalized.
+bool finalizeBundles(MachineFunction &MF);
+
+/// getBundleStart - Returns the first instruction in the bundle containing MI.
+///
+static inline MachineInstr *getBundleStart(MachineInstr *MI) {
+  MachineBasicBlock::instr_iterator I = MI;
+  while (I->isInsideBundle())
+    --I;
+  return I;
+}
+
+static inline const MachineInstr *getBundleStart(const MachineInstr *MI) {
+  MachineBasicBlock::const_instr_iterator I = MI;
+  while (I->isInsideBundle())
+    --I;
+  return I;
+}
+
+//===----------------------------------------------------------------------===//
+// MachineOperand iterator
+//
+
+/// MachineOperandIteratorBase - Iterator that can visit all operands on a
+/// MachineInstr, or all operands on a bundle of MachineInstrs.  This class is
+/// not intended to be used directly, use one of the sub-classes instead.
+///
+/// Intended use:
+///
+///   for (MIBundleOperands MIO(MI); MIO.isValid(); ++MIO) {
+///     if (!MIO->isReg())
+///       continue;
+///     ...
+///   }
+///
+class MachineOperandIteratorBase {
+  MachineBasicBlock::instr_iterator InstrI, InstrE;
+  MachineInstr::mop_iterator OpI, OpE;
+
+  // If the operands on InstrI are exhausted, advance InstrI to the next
+  // bundled instruction with operands.
+  void advance() {
+    while (OpI == OpE) {
+      // Don't advance off the basic block, or into a new bundle.
+      if (++InstrI == InstrE || !InstrI->isInsideBundle())
+        break;
+      OpI = InstrI->operands_begin();
+      OpE = InstrI->operands_end();
+    }
+  }
+
+protected:
+  /// MachineOperandIteratorBase - Create an iterator that visits all operands
+  /// on MI, or all operands on every instruction in the bundle containing MI.
+  ///
+  /// @param MI The instruction to examine.
+  /// @param WholeBundle When true, visit all operands on the entire bundle.
+  ///
+  explicit MachineOperandIteratorBase(MachineInstr *MI, bool WholeBundle) {
+    if (WholeBundle) {
+      InstrI = getBundleStart(MI);
+      InstrE = MI->getParent()->instr_end();
+    } else {
+      InstrI = InstrE = MI;
+      ++InstrE;
+    }
+    OpI = InstrI->operands_begin();
+    OpE = InstrI->operands_end();
+    if (WholeBundle)
+      advance();
+  }
+
+  MachineOperand &deref() const { return *OpI; }
+
+public:
+  /// isValid - Returns true until all the operands have been visited.
+  bool isValid() const { return OpI != OpE; }
+
+  /// Preincrement.  Move to the next operand.
+  void operator++() {
+    assert(isValid() && "Cannot advance MIOperands beyond the last operand");
+    ++OpI;
+    advance();
+  }
+
+  /// getOperandNo - Returns the number of the current operand relative to its
+  /// instruction.
+  ///
+  unsigned getOperandNo() const {
+    return OpI - InstrI->operands_begin();
+  }
+
+  /// RegInfo - Information about a virtual register used by a set of operands.
+  ///
+  struct RegInfo {
+    /// Reads - One of the operands read the virtual register.  This does not
+    /// include <undef> or <internal> use operands, see MO::readsReg().
+    bool Reads;
+
+    /// Writes - One of the operands writes the virtual register.
+    bool Writes;
+
+    /// Tied - Uses and defs must use the same register. This can be because of
+    /// a two-address constraint, or there may be a partial redefinition of a
+    /// sub-register.
+    bool Tied;
+  };
+
+  /// analyzeVirtReg - Analyze how the current instruction or bundle uses a
+  /// virtual register.  This function should not be called after operator++(),
+  /// it expects a fresh iterator.
+  ///
+  /// @param Reg The virtual register to analyze.
+  /// @param Ops When set, this vector will receive an (MI, OpNum) entry for
+  ///            each operand referring to Reg.
+  /// @returns A filled-in RegInfo struct.
+  RegInfo analyzeVirtReg(unsigned Reg,
+                 SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = 0);
+};
+
+/// MIOperands - Iterate over operands of a single instruction.
+///
+class MIOperands : public MachineOperandIteratorBase {
+public:
+  MIOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, false) {}
+  MachineOperand &operator* () const { return deref(); }
+  MachineOperand *operator->() const { return &deref(); }
+};
+
+/// ConstMIOperands - Iterate over operands of a single const instruction.
+///
+class ConstMIOperands : public MachineOperandIteratorBase {
+public:
+  ConstMIOperands(const MachineInstr *MI)
+    : MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), false) {}
+  const MachineOperand &operator* () const { return deref(); }
+  const MachineOperand *operator->() const { return &deref(); }
+};
+
+/// MIBundleOperands - Iterate over all operands in a bundle of machine
+/// instructions.
+///
+class MIBundleOperands : public MachineOperandIteratorBase {
+public:
+  MIBundleOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, true) {}
+  MachineOperand &operator* () const { return deref(); }
+  MachineOperand *operator->() const { return &deref(); }
+};
+
+/// ConstMIBundleOperands - Iterate over all operands in a const bundle of
+/// machine instructions.
+///
+class ConstMIBundleOperands : public MachineOperandIteratorBase {
+public:
+  ConstMIBundleOperands(const MachineInstr *MI)
+    : MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), true) {}
+  const MachineOperand &operator* () const { return deref(); }
+  const MachineOperand *operator->() const { return &deref(); }
+};
+
+} // End llvm namespace
+
+#endif
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,91 @@
+//==- MachineScheduler.h - MachineInstr Scheduling Pass ----------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a MachineSchedRegistry for registering alternative machine
+// schedulers. A Target may provide an alternative scheduler implementation by
+// implementing the following boilerplate:
+//
+// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
+//  return new CustomMachineScheduler(C);
+// }
+// static MachineSchedRegistry
+// SchedCustomRegistry("custom", "Run my target's custom scheduler",
+//                     createCustomMachineSched);
+//
+// Inside <Target>PassConfig:
+//   enablePass(MachineSchedulerID);
+//   MachineSchedRegistry::setDefault(createCustomMachineSched);
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MACHINESCHEDULER_H
+#define MACHINESCHEDULER_H
+
+#include "llvm/CodeGen/MachinePassRegistry.h"
+
+namespace llvm {
+
+class AliasAnalysis;
+class LiveIntervals;
+class MachineDominatorTree;
+class MachineLoopInfo;
+class ScheduleDAGInstrs;
+
+/// MachineSchedContext provides enough context from the MachineScheduler pass
+/// for the target to instantiate a scheduler.
+struct MachineSchedContext {
+  MachineFunction *MF;
+  const MachineLoopInfo *MLI;
+  const MachineDominatorTree *MDT;
+  const TargetPassConfig *PassConfig;
+  AliasAnalysis *AA;
+  LiveIntervals *LIS;
+
+  MachineSchedContext(): MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {}
+};
+
+/// MachineSchedRegistry provides a selection of available machine instruction
+/// schedulers.
+class MachineSchedRegistry : public MachinePassRegistryNode {
+public:
+  typedef ScheduleDAGInstrs *(*ScheduleDAGCtor)(MachineSchedContext *);
+
+  // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
+  typedef ScheduleDAGCtor FunctionPassCtor;
+
+  static MachinePassRegistry Registry;
+
+  MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
+    : MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
+    Registry.Add(this);
+  }
+  ~MachineSchedRegistry() { Registry.Remove(this); }
+
+  // Accessors.
+  //
+  MachineSchedRegistry *getNext() const {
+    return (MachineSchedRegistry *)MachinePassRegistryNode::getNext();
+  }
+  static MachineSchedRegistry *getList() {
+    return (MachineSchedRegistry *)Registry.getList();
+  }
+  static ScheduleDAGCtor getDefault() {
+    return (ScheduleDAGCtor)Registry.getDefault();
+  }
+  static void setDefault(ScheduleDAGCtor C) {
+    Registry.setDefault((MachinePassCtor)C);
+  }
+  static void setListener(MachinePassRegistryListener *L) {
+    Registry.setListener(L);
+  }
+};
+
+} // namespace llvm
+
+#endif
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,142 @@
+//===----- ResourcePriorityQueue.h - A DFA-oriented priority queue -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ResourcePriorityQueue class, which is a
+// SchedulingPriorityQueue that schedules using DFA state to
+// reduce the length of the critical path through the basic block
+// on VLIW platforms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef RESOURCE_PRIORITY_QUEUE_H
+#define RESOURCE_PRIORITY_QUEUE_H
+
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+  class ResourcePriorityQueue;
+
+  /// Sorting functions for the Available queue.
+  struct resource_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+    ResourcePriorityQueue *PQ;
+    explicit resource_sort(ResourcePriorityQueue *pq) : PQ(pq) {}
+
+    bool operator()(const SUnit* left, const SUnit* right) const;
+  };
+
+  class ResourcePriorityQueue : public SchedulingPriorityQueue {
+    /// SUnits - The SUnits for the current graph.
+    std::vector<SUnit> *SUnits;
+
+    /// NumNodesSolelyBlocking - This vector contains, for every node in the
+    /// Queue, the number of nodes that the node is the sole unscheduled
+    /// predecessor for.  This is used as a tie-breaker heuristic for better
+    /// mobility.
+    std::vector<unsigned> NumNodesSolelyBlocking;
+
+    /// Queue - The queue.
+    std::vector<SUnit*> Queue;
+
+    /// RegPressure - Tracking current reg pressure per register class.
+    ///
+    std::vector<unsigned> RegPressure;
+
+    /// RegLimit - Tracking the number of allocatable registers per register
+    /// class.
+    std::vector<unsigned> RegLimit;
+
+    resource_sort Picker;
+    const TargetRegisterInfo *TRI;
+    const TargetLowering *TLI;
+    const TargetInstrInfo *TII;
+    const InstrItineraryData* InstrItins;
+    /// ResourcesModel - Represents VLIW state.
+    /// Not limited to VLIW targets per say, but assumes
+    /// definition of DFA by a target.
+    DFAPacketizer *ResourcesModel;
+
+    /// Resource model - packet/bundle model. Purely
+    /// internal at the time.
+    std::vector<SUnit*> Packet;
+
+    /// Heuristics for estimating register pressure.
+    unsigned ParallelLiveRanges;
+    signed HorizontalVerticalBalance;
+
+  public:
+    ResourcePriorityQueue(SelectionDAGISel *IS);
+
+    ~ResourcePriorityQueue() {
+      delete ResourcesModel;
+    }
+
+    bool isBottomUp() const { return false; }
+
+    void initNodes(std::vector<SUnit> &sunits);
+
+    void addNode(const SUnit *SU) {
+      NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+    }
+
+    void updateNode(const SUnit *SU) {}
+
+    void releaseState() {
+      SUnits = 0;
+    }
+
+    unsigned getLatency(unsigned NodeNum) const {
+      assert(NodeNum < (*SUnits).size());
+      return (*SUnits)[NodeNum].getHeight();
+    }
+
+    unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
+      assert(NodeNum < NumNodesSolelyBlocking.size());
+      return NumNodesSolelyBlocking[NodeNum];
+    }
+
+    /// Single cost function reflecting benefit of scheduling SU
+    /// in the current cycle.
+    signed SUSchedulingCost (SUnit *SU);
+
+    /// InitNumRegDefsLeft - Determine the # of regs defined by this node.
+    ///
+    void initNumRegDefsLeft(SUnit *SU);
+    void updateNumRegDefsLeft(SUnit *SU);
+    signed regPressureDelta(SUnit *SU, bool RawPressure = false);
+    signed rawRegPressureDelta (SUnit *SU, unsigned RCId);
+
+    bool empty() const { return Queue.empty(); }
+
+    virtual void push(SUnit *U);
+
+    virtual SUnit *pop();
+
+    virtual void remove(SUnit *SU);
+
+    virtual void dump(ScheduleDAG* DAG) const;
+
+    /// scheduledNode - Main resource tracking point.
+    void scheduledNode(SUnit *Node);
+    bool isResourceAvailable(SUnit *SU);
+    void reserveResources(SUnit *SU);
+
+private:
+    void adjustPriorityOfUnscheduledPreds(SUnit *SU);
+    SUnit *getSingleUnscheduledPred(SUnit *SU);
+    unsigned numberRCValPredInSU (SUnit *SU, unsigned RCId);
+    unsigned numberRCValSuccInSU (SUnit *SU, unsigned RCId);
+  };
+}
+
+#endif
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,344 @@
+//==- ScheduleDAGInstrs.h - MachineInstr Scheduling --------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleDAGInstrs class, which implements
+// scheduling for a MachineInstr-based dependency graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCHEDULEDAGINSTRS_H
+#define SCHEDULEDAGINSTRS_H
+
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SparseSet.h"
+#include <map>
+
+namespace llvm {
+  class MachineLoopInfo;
+  class MachineDominatorTree;
+  class LiveIntervals;
+
+  /// LoopDependencies - This class analyzes loop-oriented register
+  /// dependencies, which are used to guide scheduling decisions.
+  /// For example, loop induction variable increments should be
+  /// scheduled as soon as possible after the variable's last use.
+  ///
+  class LoopDependencies {
+    const MachineLoopInfo &MLI;
+    const MachineDominatorTree &MDT;
+
+  public:
+    typedef std::map<unsigned, std::pair<const MachineOperand *, unsigned> >
+      LoopDeps;
+    LoopDeps Deps;
+
+    LoopDependencies(const MachineLoopInfo &mli,
+                     const MachineDominatorTree &mdt) :
+      MLI(mli), MDT(mdt) {}
+
+    /// VisitLoop - Clear out any previous state and analyze the given loop.
+    ///
+    void VisitLoop(const MachineLoop *Loop) {
+      assert(Deps.empty() && "stale loop dependencies");
+
+      MachineBasicBlock *Header = Loop->getHeader();
+      SmallSet<unsigned, 8> LoopLiveIns;
+      for (MachineBasicBlock::livein_iterator LI = Header->livein_begin(),
+           LE = Header->livein_end(); LI != LE; ++LI)
+        LoopLiveIns.insert(*LI);
+
+      const MachineDomTreeNode *Node = MDT.getNode(Header);
+      const MachineBasicBlock *MBB = Node->getBlock();
+      assert(Loop->contains(MBB) &&
+             "Loop does not contain header!");
+      VisitRegion(Node, MBB, Loop, LoopLiveIns);
+    }
+
+  private:
+    void VisitRegion(const MachineDomTreeNode *Node,
+                     const MachineBasicBlock *MBB,
+                     const MachineLoop *Loop,
+                     const SmallSet<unsigned, 8> &LoopLiveIns) {
+      unsigned Count = 0;
+      for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
+           I != E; ++I) {
+        const MachineInstr *MI = I;
+        if (MI->isDebugValue())
+          continue;
+        for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+          const MachineOperand &MO = MI->getOperand(i);
+          if (!MO.isReg() || !MO.isUse())
+            continue;
+          unsigned MOReg = MO.getReg();
+          if (LoopLiveIns.count(MOReg))
+            Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count)));
+        }
+        ++Count; // Not every iteration due to dbg_value above.
+      }
+
+      const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
+      for (std::vector<MachineDomTreeNode*>::const_iterator I =
+           Children.begin(), E = Children.end(); I != E; ++I) {
+        const MachineDomTreeNode *ChildNode = *I;
+        MachineBasicBlock *ChildBlock = ChildNode->getBlock();
+        if (Loop->contains(ChildBlock))
+          VisitRegion(ChildNode, ChildBlock, Loop, LoopLiveIns);
+      }
+    }
+  };
+
+  /// An individual mapping from virtual register number to SUnit.
+  struct VReg2SUnit {
+    unsigned VirtReg;
+    SUnit *SU;
+
+    VReg2SUnit(unsigned reg, SUnit *su): VirtReg(reg), SU(su) {}
+
+    unsigned getSparseSetKey() const {
+      return TargetRegisterInfo::virtReg2Index(VirtReg);
+    }
+  };
+
+  /// Combine a SparseSet with a 1x1 vector to track physical registers.
+  /// The SparseSet allows iterating over the (few) live registers for quickly
+  /// comparing against a regmask or clearing the set.
+  ///
+  /// Storage for the map is allocated once for the pass. The map can be
+  /// cleared between scheduling regions without freeing unused entries.
+  class Reg2SUnitsMap {
+    SparseSet<unsigned> PhysRegSet;
+    std::vector<std::vector<SUnit*> > SUnits;
+  public:
+    typedef SparseSet<unsigned>::const_iterator const_iterator;
+
+    // Allow iteration over register numbers (keys) in the map. If needed, we
+    // can provide an iterator over SUnits (values) as well.
+    const_iterator reg_begin() const { return PhysRegSet.begin(); }
+    const_iterator reg_end() const { return PhysRegSet.end(); }
+
+    /// Initialize the map with the number of registers.
+    /// If the map is already large enough, no allocation occurs.
+    /// For simplicity we expect the map to be empty().
+    void setRegLimit(unsigned Limit);
+
+    /// Returns true if the map is empty.
+    bool empty() const { return PhysRegSet.empty(); }
+
+    /// Clear the map without deallocating storage.
+    void clear();
+
+    bool contains(unsigned Reg) const { return PhysRegSet.count(Reg); }
+
+    /// If this register is mapped, return its existing SUnits vector.
+    /// Otherwise map the register and return an empty SUnits vector.
+    std::vector<SUnit *> &operator[](unsigned Reg) {
+      bool New = PhysRegSet.insert(Reg).second;
+      assert((!New || SUnits[Reg].empty()) && "stale SUnits vector");
+      (void)New;
+      return SUnits[Reg];
+    }
+
+    /// Erase an existing element without freeing memory.
+    void erase(unsigned Reg) {
+      PhysRegSet.erase(Reg);
+      SUnits[Reg].clear();
+    }
+  };
+
+  /// Use SparseSet as a SparseMap by relying on the fact that it never
+  /// compares ValueT's, only unsigned keys. This allows the set to be cleared
+  /// between scheduling regions in constant time as long as ValueT does not
+  /// require a destructor.
+  typedef SparseSet<VReg2SUnit> VReg2SUnitMap;
+
+  /// ScheduleDAGInstrs - A ScheduleDAG subclass for scheduling lists of
+  /// MachineInstrs.
+  class ScheduleDAGInstrs : public ScheduleDAG {
+  protected:
+    const MachineLoopInfo &MLI;
+    const MachineDominatorTree &MDT;
+    const MachineFrameInfo *MFI;
+    const InstrItineraryData *InstrItins;
+
+    /// Live Intervals provides reaching defs in preRA scheduling.
+    LiveIntervals *LIS;
+
+    /// isPostRA flag indicates vregs cannot be present.
+    bool IsPostRA;
+
+    /// UnitLatencies (misnamed) flag avoids computing def-use latencies, using
+    /// the def-side latency only.
+    bool UnitLatencies;
+
+    /// State specific to the current scheduling region.
+    /// ------------------------------------------------
+
+    /// The block in which to insert instructions
+    MachineBasicBlock *BB;
+
+    /// The beginning of the range to be scheduled.
+    MachineBasicBlock::iterator RegionBegin;
+
+    /// The end of the range to be scheduled.
+    MachineBasicBlock::iterator RegionEnd;
+
+    /// The index in BB of RegionEnd.
+    unsigned EndIndex;
+
+    /// After calling BuildSchedGraph, each machine instruction in the current
+    /// scheduling region is mapped to an SUnit.
+    DenseMap<MachineInstr*, SUnit*> MISUnitMap;
+
+    /// State internal to DAG building.
+    /// -------------------------------
+
+    /// Defs, Uses - Remember where defs and uses of each register are as we
+    /// iterate upward through the instructions. This is allocated here instead
+    /// of inside BuildSchedGraph to avoid the need for it to be initialized and
+    /// destructed for each block.
+    Reg2SUnitsMap Defs;
+    Reg2SUnitsMap Uses;
+
+    /// Track the last instructon in this region defining each virtual register.
+    VReg2SUnitMap VRegDefs;
+
+    /// PendingLoads - Remember where unknown loads are after the most recent
+    /// unknown store, as we iterate. As with Defs and Uses, this is here
+    /// to minimize construction/destruction.
+    std::vector<SUnit *> PendingLoads;
+
+    /// LoopRegs - Track which registers are used for loop-carried dependencies.
+    ///
+    LoopDependencies LoopRegs;
+
+    /// DbgValues - Remember instruction that preceeds DBG_VALUE.
+    /// These are generated by buildSchedGraph but persist so they can be
+    /// referenced when emitting the final schedule.
+    typedef std::vector<std::pair<MachineInstr *, MachineInstr *> >
+      DbgValueVector;
+    DbgValueVector DbgValues;
+    MachineInstr *FirstDbgValue;
+
+  public:
+    explicit ScheduleDAGInstrs(MachineFunction &mf,
+                               const MachineLoopInfo &mli,
+                               const MachineDominatorTree &mdt,
+                               bool IsPostRAFlag,
+                               LiveIntervals *LIS = 0);
+
+    virtual ~ScheduleDAGInstrs() {}
+
+    /// begin - Return an iterator to the top of the current scheduling region.
+    MachineBasicBlock::iterator begin() const { return RegionBegin; }
+
+    /// end - Return an iterator to the bottom of the current scheduling region.
+    MachineBasicBlock::iterator end() const { return RegionEnd; }
+
+    /// newSUnit - Creates a new SUnit and return a ptr to it.
+    SUnit *newSUnit(MachineInstr *MI);
+
+    /// getSUnit - Return an existing SUnit for this MI, or NULL.
+    SUnit *getSUnit(MachineInstr *MI) const;
+
+    /// startBlock - Prepare to perform scheduling in the given block.
+    virtual void startBlock(MachineBasicBlock *BB);
+
+    /// finishBlock - Clean up after scheduling in the given block.
+    virtual void finishBlock();
+
+    /// Initialize the scheduler state for the next scheduling region.
+    virtual void enterRegion(MachineBasicBlock *bb,
+                             MachineBasicBlock::iterator begin,
+                             MachineBasicBlock::iterator end,
+                             unsigned endcount);
+
+    /// Notify that the scheduler has finished scheduling the current region.
+    virtual void exitRegion();
+
+    /// buildSchedGraph - Build SUnits from the MachineBasicBlock that we are
+    /// input.
+    void buildSchedGraph(AliasAnalysis *AA);
+
+    /// addSchedBarrierDeps - Add dependencies from instructions in the current
+    /// list of instructions being scheduled to scheduling barrier. We want to
+    /// make sure instructions which define registers that are either used by
+    /// the terminator or are live-out are properly scheduled. This is
+    /// especially important when the definition latency of the return value(s)
+    /// are too high to be hidden by the branch or when the liveout registers
+    /// used by instructions in the fallthrough block.
+    void addSchedBarrierDeps();
+
+    /// computeLatency - Compute node latency.
+    ///
+    virtual void computeLatency(SUnit *SU);
+
+    /// computeOperandLatency - Override dependence edge latency using
+    /// operand use/def information
+    ///
+    virtual void computeOperandLatency(SUnit *Def, SUnit *Use,
+                                       SDep& dep) const;
+
+    /// schedule - Order nodes according to selected style, filling
+    /// in the Sequence member.
+    ///
+    /// Typically, a scheduling algorithm will implement schedule() without
+    /// overriding enterRegion() or exitRegion().
+    virtual void schedule() = 0;
+
+    /// finalizeSchedule - Allow targets to perform final scheduling actions at
+    /// the level of the whole MachineFunction. By default does nothing.
+    virtual void finalizeSchedule() {}
+
+    virtual void dumpNode(const SUnit *SU) const;
+
+    /// Return a label for a DAG node that points to an instruction.
+    virtual std::string getGraphNodeLabel(const SUnit *SU) const;
+
+    /// Return a label for the region of code covered by the DAG.
+    virtual std::string getDAGName() const;
+
+  protected:
+    void initSUnits();
+    void addPhysRegDataDeps(SUnit *SU, const MachineOperand &MO);
+    void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
+    void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
+    void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
+
+    VReg2SUnitMap::iterator findVRegDef(unsigned VirtReg) {
+      return VRegDefs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
+    }
+  };
+
+  /// newSUnit - Creates a new SUnit and return a ptr to it.
+  inline SUnit *ScheduleDAGInstrs::newSUnit(MachineInstr *MI) {
+#ifndef NDEBUG
+    const SUnit *Addr = SUnits.empty() ? 0 : &SUnits[0];
+#endif
+    SUnits.push_back(SUnit(MI, (unsigned)SUnits.size()));
+    assert((Addr == 0 || Addr == &SUnits[0]) &&
+           "SUnits std::vector reallocated on the fly!");
+    SUnits.back().OrigNode = &SUnits.back();
+    return &SUnits.back();
+  }
+
+  /// getSUnit - Return an existing SUnit for this MI, or NULL.
+  inline SUnit *ScheduleDAGInstrs::getSUnit(MachineInstr *MI) const {
+    DenseMap<MachineInstr*, SUnit*>::const_iterator I = MISUnitMap.find(MI);
+    if (I == MISUnitMap.end())
+      return 0;
+    return I->second;
+  }
+} // namespace llvm
+
+#endif
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,102 @@
+//===-- IntelJITEventsWrapper.h - Intel JIT Events API Wrapper --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper for the Intel JIT Events API. It allows for the
+// implementation of the jitprofiling library to be swapped with an alternative
+// implementation (for testing). To include this file, you must have the
+// jitprofiling.h header available; it is available in Intel(R) VTune(TM)
+// Amplifier XE 2011.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INTEL_JIT_EVENTS_WRAPPER_H
+#define INTEL_JIT_EVENTS_WRAPPER_H
+
+#include <jitprofiling.h>
+
+namespace llvm {
+
+class IntelJITEventsWrapper {
+  // Function pointer types for testing implementation of Intel jitprofiling
+  // library
+  typedef int (*NotifyEventPtr)(iJIT_JVM_EVENT, void*);
+  typedef void (*RegisterCallbackExPtr)(void *, iJIT_ModeChangedEx );
+  typedef iJIT_IsProfilingActiveFlags (*IsProfilingActivePtr)(void);
+  typedef void (*FinalizeThreadPtr)(void);
+  typedef void (*FinalizeProcessPtr)(void);
+  typedef unsigned int (*GetNewMethodIDPtr)(void);
+
+  NotifyEventPtr NotifyEventFunc;
+  RegisterCallbackExPtr RegisterCallbackExFunc;
+  IsProfilingActivePtr IsProfilingActiveFunc;
+  FinalizeThreadPtr FinalizeThreadFunc;
+  FinalizeProcessPtr FinalizeProcessFunc;
+  GetNewMethodIDPtr GetNewMethodIDFunc;
+
+public:
+  bool isAmplifierRunning() {
+    return iJIT_IsProfilingActive() == iJIT_SAMPLING_ON;
+  }
+
+  IntelJITEventsWrapper()
+  : NotifyEventFunc(::iJIT_NotifyEvent),
+    RegisterCallbackExFunc(::iJIT_RegisterCallbackEx),
+    IsProfilingActiveFunc(::iJIT_IsProfilingActive),
+    FinalizeThreadFunc(::FinalizeThread),
+    FinalizeProcessFunc(::FinalizeProcess),
+    GetNewMethodIDFunc(::iJIT_GetNewMethodID) {
+  }
+
+  IntelJITEventsWrapper(NotifyEventPtr NotifyEventImpl,
+                   RegisterCallbackExPtr RegisterCallbackExImpl,
+                   IsProfilingActivePtr IsProfilingActiveImpl,
+                   FinalizeThreadPtr FinalizeThreadImpl,
+                   FinalizeProcessPtr FinalizeProcessImpl,
+                   GetNewMethodIDPtr GetNewMethodIDImpl)
+  : NotifyEventFunc(NotifyEventImpl),
+    RegisterCallbackExFunc(RegisterCallbackExImpl),
+    IsProfilingActiveFunc(IsProfilingActiveImpl),
+    FinalizeThreadFunc(FinalizeThreadImpl),
+    FinalizeProcessFunc(FinalizeProcessImpl),
+    GetNewMethodIDFunc(GetNewMethodIDImpl) {
+  }
+
+  // Sends an event anncouncing that a function has been emitted
+  //   return values are event-specific.  See Intel documentation for details.
+  int  iJIT_NotifyEvent(iJIT_JVM_EVENT EventType, void *EventSpecificData) {
+    if (!NotifyEventFunc)
+      return -1;
+    return NotifyEventFunc(EventType, EventSpecificData);
+  }
+
+  // Registers a callback function to receive notice of profiling state changes
+  void iJIT_RegisterCallbackEx(void *UserData,
+                               iJIT_ModeChangedEx NewModeCallBackFuncEx) {
+    if (RegisterCallbackExFunc)
+      RegisterCallbackExFunc(UserData, NewModeCallBackFuncEx);
+  }
+
+  // Returns the current profiler mode
+  iJIT_IsProfilingActiveFlags iJIT_IsProfilingActive(void) {
+    if (!IsProfilingActiveFunc)
+      return iJIT_NOTHING_RUNNING;
+    return IsProfilingActiveFunc();
+  }
+
+  // Generates a locally unique method ID for use in code registration
+  unsigned int iJIT_GetNewMethodID(void) {
+    if (!GetNewMethodIDFunc)
+      return -1;
+    return GetNewMethodIDFunc();
+  }
+};
+
+} //namespace llvm
+
+#endif //INTEL_JIT_EVENTS_WRAPPER_H
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,124 @@
+//===-- OProfileWrapper.h - OProfile JIT API Wrapper ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines a OProfileWrapper object that detects if the oprofile
+// daemon is running, and provides wrappers for opagent functions used to
+// communicate with the oprofile JIT interface. The dynamic library libopagent
+// does not need to be linked directly as this object lazily loads the library
+// when the first op_ function is called.
+//
+// See http://oprofile.sourceforge.net/doc/devel/jit-interface.html for the
+// definition of the interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef OPROFILE_WRAPPER_H
+#define OPROFILE_WRAPPER_H
+
+#include "llvm/Support/DataTypes.h"
+#include <opagent.h>
+
+namespace llvm {
+
+
+class OProfileWrapper {
+  typedef  op_agent_t    (*op_open_agent_ptr_t)();
+  typedef  int           (*op_close_agent_ptr_t)(op_agent_t);
+  typedef  int           (*op_write_native_code_ptr_t)(op_agent_t,
+                                                const char*,
+                                                uint64_t,
+                                                void const*,
+                                                const unsigned int);
+  typedef  int           (*op_write_debug_line_info_ptr_t)(op_agent_t,
+                                                void const*,
+                                                size_t,
+                                                struct debug_line_info const*);
+  typedef  int           (*op_unload_native_code_ptr_t)(op_agent_t, uint64_t);
+
+  // Also used for op_minor_version function which has the same signature
+  typedef  int           (*op_major_version_ptr_t)(void);
+
+  // This is not a part of the opagent API, but is useful nonetheless
+  typedef  bool          (*IsOProfileRunningPtrT)(void);
+
+
+  op_agent_t                      Agent;
+  op_open_agent_ptr_t             OpenAgentFunc;
+  op_close_agent_ptr_t            CloseAgentFunc;
+  op_write_native_code_ptr_t      WriteNativeCodeFunc;
+  op_write_debug_line_info_ptr_t  WriteDebugLineInfoFunc;
+  op_unload_native_code_ptr_t     UnloadNativeCodeFunc;
+  op_major_version_ptr_t          MajorVersionFunc;
+  op_major_version_ptr_t          MinorVersionFunc;
+  IsOProfileRunningPtrT           IsOProfileRunningFunc;
+
+  bool Initialized;
+
+public:
+  OProfileWrapper();
+
+  // For testing with a mock opagent implementation, skips the dynamic load and
+  // the function resolution.
+  OProfileWrapper(op_open_agent_ptr_t OpenAgentImpl,
+                  op_close_agent_ptr_t CloseAgentImpl,
+                  op_write_native_code_ptr_t WriteNativeCodeImpl,
+                  op_write_debug_line_info_ptr_t WriteDebugLineInfoImpl,
+                  op_unload_native_code_ptr_t UnloadNativeCodeImpl,
+                  op_major_version_ptr_t MajorVersionImpl,
+                  op_major_version_ptr_t MinorVersionImpl,
+                  IsOProfileRunningPtrT MockIsOProfileRunningImpl = 0)
+  : OpenAgentFunc(OpenAgentImpl),
+    CloseAgentFunc(CloseAgentImpl),
+    WriteNativeCodeFunc(WriteNativeCodeImpl),
+    WriteDebugLineInfoFunc(WriteDebugLineInfoImpl),
+    UnloadNativeCodeFunc(UnloadNativeCodeImpl),
+    MajorVersionFunc(MajorVersionImpl),
+    MinorVersionFunc(MinorVersionImpl),
+    IsOProfileRunningFunc(MockIsOProfileRunningImpl),
+    Initialized(true)
+  {
+  }
+
+  // Calls op_open_agent in the oprofile JIT library and saves the returned
+  // op_agent_t handle internally so it can be used when calling all the other
+  // op_* functions. Callers of this class do not need to keep track of
+  // op_agent_t objects.
+  bool op_open_agent();
+
+  int op_close_agent();
+  int op_write_native_code(const char* name,
+                           uint64_t addr,
+                           void const* code,
+                           const unsigned int size);
+  int op_write_debug_line_info(void const* code,
+                               size_t num_entries,
+                               struct debug_line_info const* info);
+  int op_unload_native_code(uint64_t addr);
+  int op_major_version(void);
+  int op_minor_version(void);
+
+  // Returns true if the oprofiled process is running, the opagent library is
+  // loaded and a connection to the agent has been established, and false
+  // otherwise.
+  bool isAgentAvailable();
+
+private:
+  // Loads the libopagent library and initializes this wrapper if the oprofile
+  // daemon is running
+  bool initialize();
+
+  // Searches /proc for the oprofile daemon and returns true if the process if
+  // found, or false otherwise.
+  bool checkForOProfileProcEntry();
+
+  bool isOProfileRunning();
+};
+
+} // namespace llvm
+
+#endif //OPROFILE_WRAPPER_H
diff -r d27f0705b100 -r 7bbd6bca528b head/contrib/llvm/include/llvm/IntrinsicsHexagon.td
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/head/contrib/llvm/include/llvm/IntrinsicsHexagon.td	Tue Apr 17 11:33:49 2012 +0300
@@ -0,0 +1,3671 @@
+//===- IntrinsicsHexagon.td - Defines Hexagon intrinsics ---*- tablegen -*-===//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the Hexagon-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Definitions for all Hexagon intrinsics.
+//
+// All Hexagon intrinsics start with "llvm.hexagon.".
+let TargetPrefix = "hexagon" in {
+  /// Hexagon_Intrinsic - Base class for all altivec intrinsics.
+  class Hexagon_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+                              list<LLVMType> param_types,
+                              list<IntrinsicProperty> properties>
+    : GCCBuiltin<!strconcat("__builtin_", GCCIntSuffix)>,
+      Intrinsic<ret_types, param_types, properties>;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_MEM,BT_BOOL,BT_PTR) ->
+// Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_ptr_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(void_ftype_SI,BT_VOID,BT_INT) ->
+// Hexagon_void_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_void_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(HI_ftype_SI,BT_I16,BT_INT) ->
+// Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i16_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_SI,BT_INT,BT_INT) ->
+// Hexagon_si_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_SI,BT_LONGLONG,BT_INT) ->
+// Hexagon_di_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_DI,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_di_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_DI,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_di_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_di_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_QI,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_SI,BT_BOOL,BT_INT) ->
+// Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_QI,BT_LONGLONG,BT_BOOL) ->
+// Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_QI,BT_INT,BT_BOOL) ->
+// Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_SISI,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(void_ftype_SISI,BT_VOID,BT_INT,BT_INT) ->
+// Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_SISI,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(USI_ftype_SISI,BT_UINT,BT_INT,BT_INT) ->
+// Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_SISI,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(UDI_ftype_SISI,BT_ULONGLONG,BT_INT,BT_INT) ->
+// Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_SIDI,BT_LONGLONG,BT_INT,BT_LONGLONG) ->
+// Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_DISI,BT_LONGLONG,BT_LONGLONG,BT_INT) ->
+// Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_SIDI,BT_INT,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_DIDI,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i64_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_DIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(UDI_ftype_DIDI,BT_ULONGLONG,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_DISI,BT_INT,BT_LONGLONG,BT_INT) ->
+// Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_DIDI,BT_BOOL,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i64_ty, llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_QIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_QIQIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_QIQI,BT_INT,BT_BOOL,BT_BOOL) ->
+// Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_QISI,BT_INT,BT_BOOL,BT_INT) ->
+// Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i1_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(void_ftype_SISISI,BT_VOID,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SISISI,BT_INT,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_SISISI,BT_LONGLONG,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_DISISI,BT_INT,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DISISI,BT_LONGLONG,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SIDISI,BT_INT,BT_INT,BT_LONGLONG,BT_INT) ->
+// Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDISI,BT_LONGLONG,BT_LONGLONG,
+//                     BT_LONGLONG,BT_INT) ->
+// Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SIDIDI,BT_INT,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
+                           llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
+//                     BT_LONGLONG) ->
+// Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+                           llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SISIDI,BT_INT,BT_INT,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_QISISI,BT_INT,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_QISISI,BT_LONGLONG,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i1_ty, llvm_i32_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_QIDIDI,BT_LONGLONG,BT_BOOL,BT_LONGLONG,
+//                     BT_LONGLONG) ->
+// Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty,
+                           llvm_i64_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIQI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
+//                     BT_BOOL) ->
+// Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+                           llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_4(SI_ftype_SISISISI,BT_INT,BT_INT,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+                           llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_4(DI_ftype_DIDISISI,BT_LONGLONG,BT_LONGLONG,
+//                     BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
+  : Hexagon_Intrinsic<GCCIntSuffix,
+                          [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+                           llvm_i32_ty, llvm_i32_ty],
+                          [IntrNoMem]>;
+
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeq,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpeq : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgt,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgt : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgtu : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgtu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeqp,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpeqp : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpeqp">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtp,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpgtp : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpgtp">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtup,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpgtup : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpgtup">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsset,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsset : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsset">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsclr,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsclr : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsclr">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeqi,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpeqi : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpeqi">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgti,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgti : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgti">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgtui : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgtui">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgei,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgei : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgei">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgeui,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgeui : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgeui">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmplt,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmplt : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmplt">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpltu,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpltu : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpltu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsclri,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsclri : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsclri">;
+//
+// BUILTIN_INFO(HEXAGON.C2_and,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_and : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.and">;
+//
+// BUILTIN_INFO(HEXAGON.C2_or,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_or : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.or">;
+//
+// BUILTIN_INFO(HEXAGON.C2_xor,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_xor : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.xor">;
+//
+// BUILTIN_INFO(HEXAGON.C2_andn,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_andn : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.andn">;
+//
+// BUILTIN_INFO(HEXAGON.C2_not,QI_ftype_QI,1)
+//
+def int_hexagon_C2_not : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.not">;
+//
+// BUILTIN_INFO(HEXAGON.C2_orn,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_orn : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.orn">;
+//
+// BUILTIN_INFO(HEXAGON.C2_pxfer_map,QI_ftype_QI,1)
+//
+def int_hexagon_C2_pxfer_map : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.pxfer.map">;
+//
+// BUILTIN_INFO(HEXAGON.C2_any8,QI_ftype_QI,1)
+//
+def int_hexagon_C2_any8 : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.any8">;
+//
+// BUILTIN_INFO(HEXAGON.C2_all8,QI_ftype_QI,1)
+//
+def int_hexagon_C2_all8 : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.all8">;
+//
+// BUILTIN_INFO(HEXAGON.C2_vitpack,SI_ftype_QIQI,2)
+//
+def int_hexagon_C2_vitpack : Hexagon_si_qiqi_Intrinsic<"HEXAGON.C2.vitpack">;
+//
+// BUILTIN_INFO(HEXAGON.C2_mux,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_mux : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.mux">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxii,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxii : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxii">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxir,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxir : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxir">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxri,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxri : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxri">;
+//
+// BUILTIN_INFO(HEXAGON.C2_vmux,DI_ftype_QIDIDI,3)
+//
+def int_hexagon_C2_vmux : Hexagon_di_qididi_Intrinsic<"HEXAGON.C2.vmux">;
+//
+// BUILTIN_INFO(HEXAGON.C2_mask,DI_ftype_QI,1)
+//
+def int_hexagon_C2_mask : Hexagon_di_qi_Intrinsic<"HEXAGON.C2.mask">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpbeq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpbeq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpbeq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpbgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpbgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpbgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpheq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpheq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpheq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmphgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmphgt : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmphgt">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmphgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmphgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmphgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpweq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpweq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpweq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpwgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpwgt : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpwgt">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpwgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpwgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpwgtu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_tfrpr,SI_ftype_QI,1)
+//
+def int_hexagon_C2_tfrpr : Hexagon_si_qi_Intrinsic<"HEXAGON.C2.tfrpr">;
+//
+// BUILTIN_INFO(HEXAGON.C2_tfrrp,QI_ftype_SI,1)
+//
+def int_hexagon_C2_tfrrp : Hexagon_qi_si_Intrinsic<"HEXAGON.C2.tfrrp">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hh_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hh_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hl_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hl_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_lh_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_lh_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_ll_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_ll_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hh_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hh_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hl_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hl_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_lh_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_lh_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_ll_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_ll_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpysmi,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpysmi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpysmi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_macsip,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_macsip :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.macsip">;
+//
+// BUILTIN_INFO(HEXAGON.M2_macsin,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_macsin :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.macsin">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyss_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.dpmpyss.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_acc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyss_acc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyss.acc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_nac_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyss_nac_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyss.nac.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyuu_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.dpmpyuu.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_acc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyuu_acc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyuu.acc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_nac_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyuu_nac_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyuu.nac.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_up,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_up :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_rnd_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyss_rnd_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.dpmpyss.rnd.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyi,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpyi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyui,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpyui">;
+//
+// BUILTIN_INFO(HEXAGON.M2_maci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_maci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.maci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_acci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_acci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.acci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_accii,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_accii :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.accii">;
+//
+// BUILTIN_INFO(HEXAGON.M2_nacci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_nacci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.nacci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_naccii,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_naccii :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.naccii">;
+//
+// BUILTIN_INFO(HEXAGON.M2_subacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_subacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.subacc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2s_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2s_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2s.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2s_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2s_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2s.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0pack,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s0pack :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s0pack">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1pack,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s1pack :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s1pack">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vmpy2es_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vmpy2es.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vmpy2es_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vmpy2es.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrmac_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrmac_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrmac.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrmpy_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrmpy_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrmpy.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s0,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpyrs_s0 :
+Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vdmpyrs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s1,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpyrs_s1 :
+Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vdmpyrs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmacs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vdmacs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vdmacs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmacs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vdmacs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vdmacs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpys_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpys_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vdmpys.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpys_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpys_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vdmpys.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrs_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrs_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrsc_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrsc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrsc_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrsc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacs_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacs_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacs_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacs_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacsc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacsc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacsc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacsc_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacsc_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacsc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpys_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpys_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpys.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpys_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpys_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpys.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpysc_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpysc_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpysc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpysc_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpysc_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpysc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacs_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacs_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacs_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacs_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacsc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacsc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacsc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacsc_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacsc_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacsc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1,DI_ftype_DISI,2)
+//
+def int_hexagon_M2_vrcmpys_s1 :
+Hexagon_di_disi_Intrinsic<"HEXAGON.M2.vrcmpys.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_acc_s1,DI_ftype_DIDISI,3)
+//
+def int_hexagon_M2_vrcmpys_acc_s1 :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.M2.vrcmpys.acc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1rp,SI_ftype_DISI,2)
+//
+def int_hexagon_M2_vrcmpys_s1rp :
+Hexagon_si_disi_Intrinsic<"HEXAGON.M2.vrcmpys.s1rp">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyl_rs1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyl_rs1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.hmmpyl.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyh_rs1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyh_rs1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.hmmpyh.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmaci_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmaci.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmacr_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmacr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0c,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmaci_s0c :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmaci.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0c,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmacr_s0c :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmacr.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmaci_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmaci_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmaci.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacr_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacr_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyi_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyi.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyr_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0c,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyi_s0c :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyi.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0c,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyr_s0c :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyr.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyi_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyi_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpyi.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyr_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyr_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpyr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_i,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s0_sat_i :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s0.sat.i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_r,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s0_sat_r :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s0.sat.r">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_i,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s1_sat_i :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s1.sat.i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_r,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s1_sat_r :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s1.sat.r">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_i,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vcmac_s0_sat_i :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vcmac.s0.sat.i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_r,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vcmac_s0_sat_r :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vcmac.s0.sat.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vcrotate,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_vcrotate :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.vcrotate">;
+//
+// BUILTIN_INFO(HEXAGON.A2_add,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_add :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.add">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sub,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_sub :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.sub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addsat,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addsat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subsat,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subsat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addi,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.hl">;
+def int_hexagon_A2_addh_l16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.lh">;
+def int_hexagon_A2_addh_l16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.hl">;
+def int_hexagon_A2_addh_l16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.lh">;
+def int_hexagon_A2_addh_l16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.sat.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_aslh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_aslh :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.aslh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_asrh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_asrh :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.asrh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_addp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.addp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addpsat,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_addpsat :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.addpsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addsp,DI_ftype_SIDI,2)
+//
+def int_hexagon_A2_addsp :
+Hexagon_di_sidi_Intrinsic<"HEXAGON.A2.addsp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_subp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.subp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_neg,SI_ftype_SI,1)
+//
+def int_hexagon_A2_neg :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.neg">;
+//
+// BUILTIN_INFO(HEXAGON.A2_negsat,SI_ftype_SI,1)
+//
+def int_hexagon_A2_negsat :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.negsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_abs,SI_ftype_SI,1)
+//
+def int_hexagon_A2_abs :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.abs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_abssat,SI_ftype_SI,1)
+//
+def int_hexagon_A2_abssat :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.abssat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vconj,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vconj :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vconj">;
+//
+// BUILTIN_INFO(HEXAGON.A2_negp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_negp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.negp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_absp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_absp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.absp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_max,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_max :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.max">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxu,USI_ftype_SISI,2)
+//
+def int_hexagon_A2_maxu :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.A2.maxu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_min,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_min :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.min">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minu,USI_ftype_SISI,2)
+//
+def int_hexagon_A2_minu :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.A2.minu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_maxp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.maxp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxup,UDI_ftype_DIDI,2)
+//
+def int_hexagon_A2_maxup :
+Hexagon_udi_didi_Intrinsic<"HEXAGON.A2.maxup">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_minp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.minp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minup,UDI_ftype_DIDI,2)
+//
+def int_hexagon_A2_minup :
+Hexagon_udi_didi_Intrinsic<"HEXAGON.A2.minup">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfr,SI_ftype_SI,1)
+//
+def int_hexagon_A2_tfr :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.tfr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrsi,SI_ftype_SI,1)
+//
+def int_hexagon_A2_tfrsi :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.tfrsi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_tfrp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.tfrp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrpi,DI_ftype_SI,1)
+//
+def int_hexagon_A2_tfrpi :
+Hexagon_di_si_Intrinsic<"HEXAGON.A2.tfrpi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_zxtb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_zxtb :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.zxtb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxtb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sxtb :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.sxtb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_zxth,SI_ftype_SI,1)
+//
+def int_hexagon_A2_zxth :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.zxth">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxth,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sxth :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.sxth">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combinew,DI_ftype_SISI,2)
+//
+def int_hexagon_A2_combinew :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.A2.combinew">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combineii,DI_ftype_SISI,2)
+//
+def int_hexagon_A2_combineii :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.A2.combineii">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfril,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_tfril :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.tfril">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrih,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_tfrih :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.tfrih">;
+//
+// BUILTIN_INFO(HEXAGON.A2_and,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_and :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.and">;
+//
+// BUILTIN_INFO(HEXAGON.A2_or,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_or :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.or">;
+//
+// BUILTIN_INFO(HEXAGON.A2_xor,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_xor :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.xor">;
+//
+// BUILTIN_INFO(HEXAGON.A2_not,SI_ftype_SI,1)
+//
+def int_hexagon_A2_not :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.not">;
+//
+// BUILTIN_INFO(HEXAGON.M2_xor_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_xor_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.xor.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subri,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subri">;
+//
+// BUILTIN_INFO(HEXAGON.A2_andir,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_andir :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.andir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_orir,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_orir :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.orir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_andp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_andp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.andp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_orp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_orp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.orp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_xorp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_xorp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.xorp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_notp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_notp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.notp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxtw,DI_ftype_SI,1)
+//
+def int_hexagon_A2_sxtw :
+Hexagon_di_si_Intrinsic<"HEXAGON.A2.sxtw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sat,SI_ftype_DI,1)
+//
+def int_hexagon_A2_sat :
+Hexagon_si_di_Intrinsic<"HEXAGON.A2.sat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sath,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sath :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.sath">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satuh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satuh :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.satuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satub,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satub :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.satub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satb :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.satb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddubs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddubs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddubs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddh  :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vadduhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vadduhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddws,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddws :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddws">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svavgh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svavgh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svavghs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svavghs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svavghs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svnavgh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svnavgh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svnavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svaddh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svaddh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svaddh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svaddhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svaddhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svaddhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svadduhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svadduhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubuhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubuhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubuhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vraddub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vraddub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vraddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vraddub_acc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_A2_vraddub_acc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.A2.vraddub.acc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vradduh,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vradduh :
+Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vradduh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsububs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsububs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsububs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubuhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubuhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubuhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubws,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubws :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubws">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabsh,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabsh :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabsh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabshsat,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabshsat :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabshsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabsw,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabsw :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabsw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabswsat,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabswsat :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabswsat">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vabsdiffw,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vabsdiffw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vabsdiffw">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vabsdiffh,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vabsdiffh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vabsdiffh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vrsadub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vrsadub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vrsadub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vrsadub_acc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_A2_vrsadub_acc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.A2.vrsadub.acc">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgwcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgwcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgwcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgwcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgwcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgwcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavghcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavghcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavghcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavghcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavghcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavghcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgubr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgubr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgubr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguhr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavghr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavghr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavghr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavghr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavghr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavghr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminuh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminuh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxuh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxuh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminuw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminuw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminuw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxuw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxuw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxuw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsr_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.lsr.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r,SI_ftype_SI