<?xml version="1.0" encoding="UTF-8" ?>
		<urlset
			xmlns="https://www.sitemaps.org/schemas/sitemap/0.9"
			xmlns:xhtml="https://www.w3.org/1999/xhtml"
			xmlns:mobile="https://www.google.com/schemas/sitemap-mobile/1.0"
			xmlns:news="https://www.google.com/schemas/sitemap-news/0.9"
			xmlns:image="https://www.google.com/schemas/sitemap-image/1.1"
			xmlns:video="https://www.google.com/schemas/sitemap-video/1.1"
		>
      
    <url>
      <loc>https://hackernoon.com/turbosparse-democratizing-ai-via-efficient-drelu-sparsification</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/turbosparse-limitations-the-impact-of-150b-token-recovery-training</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/turbosparse-faster-llms-via-drelu-activation</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/drelu-sparsification-high-performance-90percent-sparsity-for-next-gen-llms</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/turbosparse-mobile-22x-faster-mixtral-inference-on-powerinfer-2</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/turbosparse-inference-46x-faster-llm-decoding-via-hybrid-gpu-cpu-computing</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/turbosparse-elite-inference-speed-via-drelu-sparsity</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/turbosparse-inference-speedup-powerinfer-integration-for-real-time-llm-decoding</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/turbosparse-efficiency-achieving-97percent-parameter-sparsity-in-mixtral-47b</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/turbosparse-llm-performance-outperforming-mixtral-and-gemma-with-extreme-sparsity</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/drelu-sparsification-recovering-llm-performance-with-150b-token-pretraining</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/sparse-activation-in-moe-models-extending-relufication-to-mixture-of-experts</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/drelu-activation-function-matching-swiglu-performance-with-90percent-sparsity</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/analyzing-relufication-limitations-enhancing-llm-sparsity-via-up-projection</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/optimizing-llm-inference-sparse-activation-moe-and-gated-mlp-efficiency</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/turbosparse-llm-accelerating-mixtral-and-mistral-inference-via-drelu-sparsity</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/toto-time-series-optimized-transformer-for-observability</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/toto-ai-model-sets-new-benchmark-for-time-series-forecasting</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-datadog-turned-noisy-observability-metrics-into-ai-gold</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-toto-reimagines-multi-head-attention-for-multivariate-forecasting</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/the-time-series-optimized-transformer-setting-new-standards-in-observability</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tnt-llm-implementation-details-pipeline-design-robustness-and-efficiency</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tnt-llm-presenting-prompt-templates</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/additional-results-cross-lingual-taxonomy-evaluation-and-in-depth-classification-analysis</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tnt-llm-generated-taxonomies-user-intent-and-conversation-domain-labels</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tnt-llm-democratizing-text-mining-with-automated-taxonomy-and-scalable-classification</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tnt-llm-high-quality-automated-text-mining-and-efficient-llm-augmented-classification</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/llm-augmented-text-classification-distilling-gpt-4-labels-into-efficient-classifiers</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tnt-llm-for-automated-taxonomy-generation-outperforming-clustering-baselines</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tnt-llm-for-user-intent-and-conversational-domain-labeling-in-bing-copilot</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/evaluating-tnt-llm-text-classification-human-agreement-and-scalable-llm-metrics</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/evaluating-tnt-llm-automatic-human-and-llm-based-assessment</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/a-new-ai-tool-builds-knowledge-graphs-so-good-they-could-rewire-scientific-discovery</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/scientists-built-a-smarter-sharper-materials-graph-by-teaching-ai-to-double-check-its-work</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/ai-model-reads-thousands-of-studies-nails-battery-science-better-than-expected</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/scientists-built-a-knowledge-graph-for-materialsand-you-can-actually-use-it</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/scientists-built-a-smart-filter-for-science-papersand-its-cleaning-up-the-data-chaos</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/this-ai-doesnt-just-skim-scientific-papersit-tags-sorts-and-explains-them-too</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/this-ai-reads-science-papers-like-a-pro-even-when-humans-cant-agree-on-the-words</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/researchers-build-ai-knowledge-graph-that-sifts-through-science-papers-for-you</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tired-of-sifting-through-science-papers-this-ai-knowledge-graph-does-it-for-you</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tnt-llm-llms-for-automated-text-taxonomy-and-classification</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tnt-llm-automating-text-taxonomy-generation-and-classification-with-large-language-models</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/batched-prompting-for-efficient-gpt-4-annotatio</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/understanding-concentrability-in-direct-nash-optimization</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/extending-direct-nash-optimization-for-regularized-preferences</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/what-does-the-future-of-ai-model-training-hold</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/prior-approaches-to-text-mining-taxonomy-clustering-and-llm-annotation</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tnt-llm-text-mining-at-scale-with-large-language-models</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/exploring-cutting-edge-approaches-to-iterative-llm-fine-tuning</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/ai-that-trains-itself-heres-how-it-works</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-contrastive-learning-helps-ai-self-improve</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-direct-nash-optimization-improves-ai-model-training</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/the-best-way-to-train-ai-reward-models-vs-preference-optimization</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/direct-nash-optimization-beats-bigger-models-with-better-data</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/the-art-of-arguing-with-yourselfand-why-its-making-ai-smarter</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/analyzing-the-impact-of-model-scaling-on-long-form-factuality</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/inside-jambas-architecture-mamba-layers-moe-and-the-future-of-ai-models</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/256k-tokens-on-one-gpu-jambas-engineering-magic-explained</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-jamba-combines-transformers-and-mamba-to-build-smarter-language-models</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/breaking-down-jamba-how-mixing-attention-and-state-spaces-makes-a-smarter-llm</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/what-jambas-benchmark-wins-tell-us-about-the-power-of-hybrid-llms</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/why-jamba-is-the-first-truly-scalable-hybrid-llm-for-long-contexts</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-ai-judges-the-accuracy-of-its-own-answers</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-ai-breaks-down-and-validates-information-for-truthfulness</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-longfact-helps-measure-the-accuracy-of-ai-responses</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-safe-performs-compared-to-human-annotations</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/benchmarking-long-form-factuality-in-large-language-models</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/challenges-in-using-google-search-for-factuality-verification</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/a-smarter-way-to-check-if-ai-answers-are-correct</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/gpt-4-gemini-ultra-and-palm-2-l-it-rlhf-top-long-form-factuality-rankings</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/android-function-examples-that-you-should-know</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/the-future-of-octopus-v2-what-does-it-entail</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/why-llms-are-more-accurate-and-cost-effective-than-human-fact-checkers</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/safe-a-new-ai-tool-for-fact-checking-long-form-responses</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-longfact-helps-ai-models-improve-their-accuracy-across-multiple-topics</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/the-ai-truth-test-new-study-tests-the-accuracy-of-13-major-ai-models</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/enabling-parallel-and-nested-function-calls-in-language-models-dataset-requirements</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/optimizing-function-calling-models-the-role-of-dataset-size-and-lora-fine-tuning-dy0vssv</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/demonstrating-adaptability-evaluating-function-calling-on-vehicle-yelp-and-doordash-apis</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/benchmarking-large-language-models-for-function-calling-gpt-4-gpt-35-llama-and-octopus</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/developing-function-calling-models-comparing-full-training-and-lora-on-gemma-2b</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/from-nlp-to-data-synthesis-the-surprising-power-of-masked-language-models</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/solving-the-missing-data-problem-with-masked-language-models</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/mlm-meets-conditional-density-a-new-way-to-handle-tabular-data-synthesis</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/can-bert-generate-tables-exploring-macodes-approach-to-synthetic-data</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-masked-language-modeling-can-be-used-to-generate-synthetic-tabular-data</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/why-macode-outperforms-gans-in-tabular-data-generation</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/generating-private-high-utility-tabular-data-with-masked-language-models</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/from-tokens-to-tables-how-nlp-tech-is-revolutionizing-synthetic-datasets</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/privacy-preserving-synthetic-data-for-ml-the-role-of-masked-language-models</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/functional-tokens-enhancing-language-models-for-function-calling</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/dataset-generation-for-api-function-calls-leveraging-google-gemini-for-accuracy</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/detailing-the-primary-methodology-implemented-in-our-models-octopus-v2</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/efficient-on-device-llms-function-calling-and-fine-tuning-strategies</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/octopus-v2-an-on-device-language-model-for-super-agent</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/dangerous-diagnoses-gpt-4vs-role-in-medical-image-interpretation</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/can-gpt-4v-diagnose-a-deep-dive-into-ais-medical-imaging-capabilities</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/the-role-of-human-in-the-loop-preferences-in-reward-function-learning-for-humanoid-tasks</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/tracking-reward-function-improvement-with-proxy-human-preferences-in-icpl</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/few-shot-in-context-preference-learning-using-large-language-models-environment-details</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/icpl-baseline-methods-disagreement-sampling-and-prefppo-for-reward-learning</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/few-shot-in-context-preference-learning-using-large-language-models-full-prompts-and-icpl-details</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-icpl-enhances-reward-function-efficiency-and-tackles-complex-rl-tasks</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/scientists-use-human-preferences-to-train-smarter-ai-agents-30x-faster</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-icpl-addresses-the-core-problem-of-rl-reward-design</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/how-do-we-teach-reinforcement-learning-agents-human-preferences</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/hacking-reinforcement-learning-with-a-little-help-from-humans-and-llms</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
    <url>
      <loc>https://hackernoon.com/researchers-uncover-breakthrough-in-human-in-the-loop-ai-training-with-icpl</loc>
        <priority>0.8</priority>
      <changefreq>daily</changefreq>
    </url>
    
            <url>
            <loc>https://languagemodels.tech/about</loc>
              <priority>0.8</priority>
            <changefreq>daily</changefreq>
            </url>
            
		</urlset>