python类WebDriverWait()的实例源码

waiter.py 文件源码 项目:explicit 作者: levi-rs 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def find_elements(driver, elem_path, by=CSS, timeout=TIMEOUT, poll_frequency=0.5):
    """ Find and return all elements once located

    find_elements locates all elements on the page, waiting
    for up to timeout seconds. The elements, when located,
    are returned. If not located, a TimeoutException is raised.

    Args:
        driver (selenium webdriver or element): A driver or element
        elem_path (str): String used to located the element
        by (selenium By): Selenium By reference
        timeout (int): Selenium Wait timeout, in seconds
        poll_frequency (float): Selenium Wait polling frequency, in seconds

    Returns:
        list of elements: Selenium element

    Raises:
        TimeoutException: Raised when target element isn't located
    """
    wait = WebDriverWait(driver, timeout, poll_frequency)
    return wait.until(EC.presence_of_all_elements_located((by, elem_path)))
clipbucket_driver.py 文件源码 项目:ansible-role-clipbucket 作者: mtlynch 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def do_check_modules(self):
        logger.info('Checking ClipBucket modules')
        self.get('/admin_area/cb_mod_check.php')

        for module_element in self._driver.find_elements_by_class_name('well'):
            ui.WebDriverWait(
                self._driver, TIMEOUT).until(
                expected_conditions.visibility_of(module_element))
            try:
                alert_element = module_element.find_element_by_class_name(
                    'alert')
                if alert_element:
                    raise ClipBucketModuleError(alert_element.text)
            except exceptions.NoSuchElementException:
                # Lack of alert is good: the module is installed correctly.
                continue
        logger.info('Module check complete')
tools.py 文件源码 项目:reahl 作者: reahl 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def wait_for(self, condition, *args, **kwargs):
        """Waits until `condition` is satisfied. If `condition` is not satisfied after a timeout period of 2 seconds,
           an exception is raised.

           :param condition: A function, method or other callable which will be called periodically to check\
                             whether a certain condition holds. It should return True if the condition holds,\
                             False otherwise.

           Extra positional and keyword arguments to this method are passed on as-is in the calls
           to `condition`.
        """
        def wrapped(driver):
            try:
                return condition(*args, **kwargs)
            except Exception as ex:
                if isinstance(ex.args[0], CannotSendRequest):
                    return False
                raise
        return WebDriverWait(self.web_driver, 2).until(wrapped)
__init__.py 文件源码 项目:python_selenium_astride 作者: reclamador 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _click_action(self, locator, timeout=10.0):
        driver = self.driver
        try:
            element = WebDriverWait(driver, float(timeout)).until(EC.element_to_be_clickable(locator),
                                                                  self._get_trace())
        except StaleElementReferenceException:
            driver.implicitly_wait(2)
            element = WebDriverWait(driver, float(timeout)).until(EC.element_to_be_clickable(locator),
                                                                  self._get_trace())
        except WebDriverException:
            driver.implicitly_wait(1)
            element = WebDriverWait(driver, float(timeout)).until(EC.element_to_be_clickable(locator),
                                                                  self._get_trace())
        try:
            element.click()
        except WebDriverException:
            sleep(1)
            element = WebDriverWait(driver, float(timeout)).until(EC.element_to_be_clickable(locator),
                                                                  self._get_trace())
            element.click()
calculate_slider_offset.py 文件源码 项目:fintech_spider 作者: hee0624 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_search_page(search_text):
    url = "http://www.gsxt.gov.cn/index.html"
    # driver = webdriver.Chrome("/home/hee/driver/chromedriver")
    driver = webdriver.Chrome("E:\\virtualenv\\chromedriver.exe")
    driver.get(url)
    wait = WebDriverWait(driver, 20)
    element = wait.until(EC.presence_of_element_located((By.ID, "keyword")))
    element.clear()
    element.send_keys(search_text)

    # element.send_keys(Keys.ENTER)
    time.sleep(random.uniform(1.0,2.0))
    element = driver.find_element_by_id("btn_query")
    element.click()
    wait = WebDriverWait(driver, 30)
    element = wait.until(
        EC.presence_of_element_located((By.CLASS_NAME, "gt_box")))
    time.sleep(random.uniform(2.0, 3.0))
    return driver
element.py 文件源码 项目:cyphon 作者: dunbarcyber 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def count(self):
        """

        """
        time.sleep(SLEEP)
        self.click()
        time.sleep(SLEEP)  # wait for server response
        option_path = self.path + '/span[@data-value]'
        try:
            WebDriverWait(self.driver, TIMEOUT).until(
                EC.presence_of_element_located((By.XPATH, option_path))
            )
            options = self.driver.find_elements_by_xpath(option_path)
            return len(options)
        except (TimeoutException, socket.timeout):
            return 0
smSpider.py 文件源码 项目:smth_coupons_crawler 作者: moyawong 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def parse_content(self,url):

        try:
            self.driver.get(url)
        except Exception,e:
            print "give up one detail"
            return ""

        try:
            element = WebDriverWait(self.driver, 30).until(
                EC.presence_of_all_elements_located((By.TAG_NAME, 'table'))
            )
            print 'element:\n', element
        except Exception, e:
            print Exception, ":", e
            print "wait failed"

        page_source = self.driver.page_source
        bs_obj = BeautifulSoup(page_source, "lxml")

        return '%s'%bs_obj.find('td', class_='a-content').p.get_text().encode('utf-8','ignore')
amazonSpider.py 文件源码 项目:smth_coupons_crawler 作者: moyawong 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def parse_content(self,url):

        try:
            self.driver.get(url)
        except Exception,e:
            print "give up one detail"
            return ""

        try:
            element = WebDriverWait(self.driver, 30).until(
                EC.presence_of_all_elements_located((By.ID, 'a-row dealTile'))
            )
            print 'element:\n', element
        except Exception, e:
            print Exception, ":", e
            print "wait failed"

        page_source = self.driver.page_source
        bs_obj = BeautifulSoup(page_source, "lxml")

        return '%s'%bs_obj.find('td', class_='a-content').p.get_text().encode('utf-8','ignore')
client.py 文件源码 项目:linkedin-jobs-scraper 作者: kirkhunter 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def link_is_present(driver, delay, selector, index, results_page):
    """
    verify that the link selector is present and print the search 
    details to console. This method is particularly useful for catching
    the last link on the last page of search results
    """
    try:
        WebDriverWait(driver, delay).until(
            EC.presence_of_element_located(
                (By.XPATH, selector)
            )
        )
        print("**************************************************")
        print("\nScraping data for result  {}" \
                "  on results page  {} \n".format(index, results_page))
    except Exception as e:
        print(e)
        if index < 25:
            print("\nWas not able to wait for job_selector to load. Search " \
                    "results may have been exhausted.")
            return True
        else:
            return False
    else:
        return True
client.py 文件源码 项目:linkedin-jobs-scraper 作者: kirkhunter 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def login(self):
        """login to linkedin then wait 3 seconds for page to load"""
        # Enter login credentials
        WebDriverWait(self.driver, 120).until(
            EC.element_to_be_clickable(
                (By.ID, "session_key-login")
            )
        )
        elem = self.driver.find_element_by_id("session_key-login")
        elem.send_keys(self.username)
        elem = self.driver.find_element_by_id("session_password-login")
        elem.send_keys(self.password)
        # Enter credentials with Keys.RETURN
        elem.send_keys(Keys.RETURN)
        # Wait a few seconds for the page to load
        time.sleep(3)
scraper.py 文件源码 项目:instagram-followers-scraper 作者: frabonomi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def authenticate(self, username, password):
        """Log in to Instagram with the provided credentials."""

        print('\nLogging in…')
        self.driver.get('https://www.instagram.com')

        # Go to log in
        login_link = WebDriverWait(self.driver, 5).until(
            EC.presence_of_element_located((By.LINK_TEXT, 'Log in'))
        )
        login_link.click()

        # Authenticate
        username_input = self.driver.find_element_by_xpath(
            '//input[@placeholder="Username"]'
        )
        password_input = self.driver.find_element_by_xpath(
            '//input[@placeholder="Password"]'
        )

        username_input.send_keys(username)
        password_input.send_keys(password)
        password_input.send_keys(Keys.RETURN)
        time.sleep(1)
WebDriverUtil.py 文件源码 项目:webnuke 作者: bugbound 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def getDriverWithProxySupport(self, proxy_host, proxy_port):
        if self.debug == False:
            self.display = Display(visible=0, size=(1920, 1080))
            self.display.start()
        profile = self.getWebDriverProfile()
        profile.set_preference("network.proxy.type", 1)
        profile.set_preference("network.proxy.http", proxy_host)
        profile.set_preference("network.proxy.http_port", proxy_port)
        profile.set_preference("network.proxy.https", proxy_host)
        profile.set_preference("network.proxy.https_port", proxy_port)
        profile.set_preference("network.proxy.ssl", proxy_host)
        profile.set_preference("network.proxy.ssl_port", proxy_port)
        profile.update_preferences()

        capabilities = webdriver.DesiredCapabilities().FIREFOX
        capabilities["marionette"] = False
        newdriver = webdriver.Firefox(firefox_profile=profile, capabilities=capabilities)

        #newdriver = webdriver.Firefox(firefox_profile=profile)
        self.wait = ui.WebDriverWait(newdriver, 10) # timeout after 10 seconds
        return newdriver
navbar_test.py 文件源码 项目:nav 作者: UNINETT 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_simple_ip_search_should_return_result(selenium, base_url):
    """Tests a search for an IP address"""
    selenium.get('{}/'.format(base_url))
    query = selenium.find_element_by_id('query')
    search_button = selenium.find_element_by_css_selector(
        "input.button[type='submit']")

    ipaddr = "192.168.42.42"
    query.send_keys(ipaddr)
    search_button.click()

    caption = WebDriverWait(selenium, 15).until(
        EC.text_to_be_present_in_element((By.TAG_NAME, "caption"), ipaddr)
    )

    caption = selenium.find_element_by_tag_name('caption')
    assert ipaddr in caption.text
common.py 文件源码 项目:picoCTF 作者: picoCTF 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def find_id_with_timeout(driver, ID, timeout=TIMEOUT):
    return WebDriverWait(driver, timeout).until(
        EC.presence_of_element_located((By.ID, ID))
    )
common.py 文件源码 项目:picoCTF 作者: picoCTF 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def find_class_with_timeout(driver, CLASS, timeout=TIMEOUT):
    return WebDriverWait(driver, timeout).until(
        EC.presence_of_element_located((By.CLASS_NAME, CLASS))
    )
common.py 文件源码 项目:picoCTF 作者: picoCTF 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def find_xpath_with_timeout(driver, XPATH, timeout=TIMEOUT):
    return WebDriverWait(driver, timeout).until(
        EC.presence_of_element_located((By.XPATH, XPATH))
    )
common.py 文件源码 项目:picoCTF 作者: picoCTF 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def find_visible_id_with_timeout(driver, ID, timeout=TIMEOUT):
    return WebDriverWait(driver, timeout).until(
            EC.visibility_of_element_located((By.ID, ID))
    )
selenium.py 文件源码 项目:SerpScrap 作者: ecoron 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _goto_next_page(self):
        """
        Click the next page element,

        Returns:
            The url of the next page or False if there is no such url
                (end of available pages for instance).
        """
        next_url = ''
        element = self._find_next_page_element()

        if hasattr(element, 'click'):
            next_url = element.get_attribute('href')
            try:
                element.click()
            except WebDriverException:
                # See http://stackoverflow.com/questions/11908249/debugging-element-is-not-clickable-at-point-error
                # first move mouse to the next element, some times the element is not visibility
                selector = self.next_page_selectors[self.search_engine_name]
                if selector:
                    try:
                        next_element = WebDriverWait(self.webdriver, 5).until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
                        webdriver.ActionChains(self.webdriver).move_to_element(next_element).perform()
                        # wait until the next page link emerges
                        WebDriverWait(self.webdriver, 8).until(
                            EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
                        element = self.webdriver.find_element_by_css_selector(selector)
                        next_url = element.get_attribute('href')
                        element.click()
                    except WebDriverException:
                        pass

        # wait until the next page was loaded

        if not next_url:
            return False
        else:
            return next_url
selenium.py 文件源码 项目:SerpScrap 作者: ecoron 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def wait_until_title_contains_keyword(self):
        try:
            WebDriverWait(self.webdriver, 5).until(EC.title_contains(self.query))
        except TimeoutException:
            logger.debug(SeleniumSearchError(
                '{}: Keyword "{}" not found in title: {}'.format(self.name, self.query, self.webdriver.title)))
selenium.py 文件源码 项目:SerpScrap 作者: ecoron 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def wait_until_serp_loaded(self):

        def wait_until_keyword_in_url(driver):
            try:
                return quote(self.query) in driver.current_url or \
                    self.query.replace(' ', '+') in driver.current_url
            except WebDriverException:
                pass

        WebDriverWait(self.webdriver, 5).until(wait_until_keyword_in_url)


问题


面经


文章

微信
公众号

扫码关注公众号