How to scrape Ask.com search engine using scrapy and Flask API?

I’m working on an application project that allows the user to get web page search results after entering a set of keywords that will be sent to Ask. For this, I created an api in Flask and scrapy inspired by the following article for the api . But, the api doesn’t work because I can’t pass the data used as keywords from my api to my scraper.
Here is my flask api file:

import crochet
crochet.setup()

from flask import Flask , render_template, jsonify, request, redirect, url_for
from scrapy import signals
from scrapy.crawler import CrawlerRunner
from scrapy.signalmanager import dispatcher
import time
import os

# Importing our Scraping Function from the amazon_scraping file

from scrap.askScraping import AskScrapingSpider

# Creating Flask App Variable

app = Flask(__name__)

output_data = []
crawl_runner = CrawlerRunner()

# By Deafult Flask will come into this when we run the file
@app.route('/')
def index():
	return render_template("index.html") # Returns index.html file in templates folder.


# After clicking the Submit Button FLASK will come into this
@app.route('/', methods=['POST'])
def submit():
    if request.method == 'POST':
        s = request.form['url'] # Getting the Input Amazon Product URL
        global baseURL
        baseURL = s
        # This will remove any existing file with the same name so that the scrapy will not append the data to any previous file.
        if os.path.exists("<path_to_outputfile.json>"): 
        	os.remove("<path_to_outputfile.json>")
        
        return redirect(url_for('scrape')) # Passing to the Scrape function
    

@app.route("/scrape")
def scrape():

    scrape_with_crochet(baseURL="https://www.ask.com/web?q={baseURL}") # Passing that URL to our Scraping Function

    time.sleep(20) # Pause the function while the scrapy spider is running
    
    return jsonify(output_data) # Returns the scraped data after being running for 20 seconds.


@crochet.run_in_reactor
def scrape_with_crochet(baseURL):
    # This will connect to the dispatcher that will kind of loop the code between these two functions.
    dispatcher.connect(_crawler_result, signal=signals.item_scraped)
    
    # This will connect to the ReviewspiderSpider function in our scrapy file and after each yield will pass to the crawler_result function.
    eventual = crawl_runner.crawl(AskScrapingSpider, category = baseURL)
    return eventual

#This will append the data to the output data list.
def _crawler_result(item, response, spider):
    output_data.append(dict(item))


if __name__== "__main__":
    app.run(debug=True)

my scraper one

import scrapy
import datetime



class AskScrapingSpider(scrapy.Spider):

    name = 'ask_scraping'
    def start_requests(self):
        myBaseUrl = ''
        start_urls = []

        def __init__(self, category='', **kwargs): # The category variable will have the input URL.
            self.myBaseUrl = category
            self.start_urls.append(self.myBaseUrl)
            super().__init__(**kwargs)

            custom_settings = {'FEED_URI': 'scrap/outputfile.json', 'CLOSESPIDER_TIMEOUT' : 15} # This will tell scrapy to store the scraped data to outputfile.json and for how long the spider should run.

        
            yield scrapy.Request(start_urls, callback=self.parse, meta={'pos': 0})
                            
    
    


    def parse(self, response):
         print('url:', response.url)
        
         start_pos = response.meta['pos']
         print('start pos:', start_pos)

         dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')    
        
         items = response.css('div.PartialSearchResults-item')
        
         for pos, result in enumerate(items, start_pos+1):
            yield {
                'title':    result.css('a.PartialSearchResults-item-title-link.result-link::text').get().strip(), 
                'snippet':  result.css('p.PartialSearchResults-item-abstract::text').get().strip(), 
                'link':     result.css('a.PartialSearchResults-item-title-link.result-link').attrib.get('href'), 
                'position': pos, 
                'date':     dt,
            }

        # --- after loop ---
        
         next_page = response.css('.PartialWebPagination-next a')
        
         if next_page:
            url = next_page.attrib.get('href')
            print('next_page:', url)  # relative URL
            # use `follow()` to add `https://www.ask.com/` to URL and create absolute URL
            yield response.follow(url, callback=self.parse, meta={'pos': pos+1})

This is what I get in the command line when I run absolutely no errors:

 python main.py
 * Serving Flask app 'main' (lazy loading)
 * Environment: production
   WARNING: This is a development server. Do not use it in a production deployment.
   Use a production WSGI server instead.
 * Debug mode: on
 * Running on http://127.0.0.1:5000 (Press CTRL+C to quit)
 * Restarting with stat
 * Debugger is active!
 * Debugger PIN: 107-226-838
127.0.0.1 - - [15/Jun/2022 12:44:54] "GET / HTTP/1.1" 200 -
127.0.0.1 - - [15/Jun/2022 12:45:03] "POST / HTTP/1.1" 302 -
127.0.0.1 - - [15/Jun/2022 12:45:23] "GET /scrape HTTP/1.1" 200 -
127.0.0.1 - - [15/Jun/2022 12:46:10] "GET /scrape HTTP/1.1" 200 -
 * Detected change in 'C:\\Users\\user\\Documents\\AAprojects\\Whelpsgroups1\\API\\main.py', reloading
 * Restarting with stat
 * Debugger is active!
 * Debugger PIN: 107-226-838
127.0.0.1 - - [15/Jun/2022 12:48:25] "GET /scrape HTTP/1.1" 200 -
 * Detected change in 'C:\\Users\\user\\Documents\\AAprojects\\Whelpsgroups1\\API\\main.py', reloading
 * Restarting with stat
 * Debugger is active!
 * Debugger PIN: 107-226-838

I looked on this stackOverflow question but without success.