diff --git a/locale/ar/about/releases.md b/locale/ar/about/releases.md index df498b621c51..a60f70913e3a 100644 --- a/locale/ar/about/releases.md +++ b/locale/ar/about/releases.md @@ -19,7 +19,4 @@ schedule-footer: التواريخ قابلة للتغيير. # الإصدارات -النسخة الرئسية للـNode.js مايبن الإصدار _الحالي_ لغاية ستة أشهر، وذلك لإعطاء الوقت الكافي لمؤلفي المكتبة لدعمهم. -بعد ستة أشهر، إصدارات الأعداد الفردية (مثل 9، 11، إلخ) تصبح غير مدعومة و إصدارات الأعداد الزوجية (10، 12، إلخ) ترحل إلى حالة _LTS ناشطة_ وهي جاهزة للاستخدام العام. -حالة الإصدار _LTS_ هو "دعم طويل الأمد" التي تتضمن الأخطاء الخطيرة ستصلح في خلال مدة أقصاها 30 شهرا. -يجب أن تستخدم التطبيقات الإصدارات _LTS الناشطة_ أو _LTS قيد الصيانة_ فقط. +النسخة الرئسية للـNode.js مايبن الإصدار _الحالي_ لغاية ستة أشهر، وذلك لإعطاء الوقت الكافي لمؤلفي المكتبة لدعمهم. بعد ستة أشهر، إصدارات الأعداد الفردية (مثل 9، 11، إلخ) تصبح غير مدعومة و إصدارات الأعداد الزوجية (10، 12، إلخ) ترحل إلى حالة _LTS ناشطة_ وهي جاهزة للاستخدام العام. حالة الإصدار _LTS_ هو "دعم طويل الأمد" التي تتضمن الأخطاء الخطيرة ستصلح في خلال مدة أقصاها 30 شهرا. يجب أن تستخدم التطبيقات الإصدارات _LTS الناشطة_ أو _LTS قيد الصيانة_ فقط. diff --git a/locale/es/about/community.md b/locale/es/about/community.md new file mode 100644 index 000000000000..64bf747374e3 --- /dev/null +++ b/locale/es/about/community.md @@ -0,0 +1,57 @@ +--- +title: Community Committee +layout: about.hbs +--- + +# Community Committee + +The Community Committee (CommComm) is a top-level committee in the Node.js Foundation. The CommComm has authority over outward-facing community outreach efforts, including: + +* Community [Evangelism](https://github.com/nodejs/evangelism) +* Education Initiatives +* Cultural Direction of Node.js Foundation +* Community Organization Outreach +* Translation and Internationalization +* Project Moderation/Mediation +* Public Outreach and [Publications](https://medium.com/the-node-js-collection) + +There are four types of involvement with the Community Committee: + +* A **Contributor** is any individual creating or commenting on an issue or pull request. +* A **Collaborator** is a contributor who has been given write access to the repository +* An **Observer** is any individual who has requested or been requested to attend a CommComm meeting. It is also the first step to becoming a Member. +* A **Member** is a collaborator with voting rights who has met the requirements of participation and voted in by the CommComm voting process. + +For the current list of Community Committee members, see the project's [README.md](https://github.com/nodejs/community-committee). + +## Contributors and Collaborators + +It is the mission of CommComm to further build out the Node.js Community. If you're reading this, you're already a part of that community – and as a part of the Node.js Community, we'd love to have your help! + +The [nodejs/community-committee](https://github.com/nodejs/community-committee) GitHub repository is a great place to start. Check out the [issues labeled "Good first issue"](https://github.com/nodejs/community-committee/labels/good%20first%20issue) to see where we're looking for help. If you have your own ideas on how we can engage and build the community, feel free to open your own issues, create pull requests with improvements to our existing work, or help us by sharing your thoughts and ideas in the ongoing discussions we're having in GitHub. + +You can further participate in our ongoing efforts around community building - like localization, evangelism, the Node.js Collection, and others - by digging into their respective repositories and getting involved! + +Before diving in, please be sure to read the [Collaborator Guide](https://github.com/nodejs/community-committee/blob/master/governance/COLLABORATOR_GUIDE.md). + +If you're interested in participating in the Community Committee as a committee member, you should read the section below on **Observers and Membership**, and create an issue asking to be an Observer in our next Community Committee meeting. You can find a great example of such an issue [here](https://github.com/nodejs/community-committee/issues/142). + +## Observers and Membership + +If you're interested in becoming more deeply involved with the Community Committee and its projects, we encourage you to become an active observer, and work toward achieving member status. To become a member you must: + +1. Attend the bi-weekly meetings, investigate issues tagged as good first issue, file issues and pull requests, and provide insight via GitHub as a contributor or collaborator. +2. Request to become an Observer by filing an issue. Once added as an Observer to meetings, we will track attendance and participation for 3 months, in accordance with our governance guidelines. You can find a great example of such an issue [here](https://github.com/nodejs/community-committee/issues/142). +3. When you meet the 3 month minimum attendance, and participation expectations, the CommComm will vote to add you as a member. + +Membership is for 6 months. The group will ask on a regular basis if the expiring members would like to stay on. A member just needs to reply to renew. There is no fixed size of the CommComm. However, the expected target is between 9 and 12. You can read more about membership, and other administrative details, in our [Governance Guide](https://github.com/nodejs/community-committee/blob/master/GOVERNANCE.md). + +Regular CommComm meetings are held bi-monthly in a Zoom video conference, and broadcast live to the public on YouTube. Any community member or contributor can ask that something be added to the next meeting's agenda by logging a GitHub Issue. + +Meeting announcements and agendas are posted before the meeting begins in the organization's [GitHub issues](https://github.com/nodejs/community-committee/issues). You can also find the regularly scheduled meetings on the [Node.js Calendar](https://nodejs.org/calendar). To follow Node.js meeting livestreams on YouTube, subscribe to the Node.js Foundation [YouTube channel](https://www.youtube.com/channel/UCQPYJluYC_sn_Qz_XE-YbTQ). Be sure to click the bell to be notified of new videos! + +## Consensus Seeking Process + +The CommComm follows a [Consensus Seeking](https://en.wikipedia.org/wiki/Consensus-seeking_decision-making) decision making model. + +When an agenda item has appeared to reach a consensus, the moderator will ask "Does anyone object?" as a final call for dissent from the consensus. If a consensus cannot be reached that has no objections then a majority wins vote is called. It is expected that the majority of decisions made by the CommComm are via a consensus seeking process and that voting is only used as a last-resort. diff --git a/locale/es/about/governance.md b/locale/es/about/governance.md index e969c06d22e5..2d9f55b46d42 100644 --- a/locale/es/about/governance.md +++ b/locale/es/about/governance.md @@ -7,28 +7,18 @@ layout: about.hbs ## Proceso de búsqueda de consenso -El proyecto Node.js sigue un modelo de [Búsqueda de Consenso][] en la toma de decisiones. +El proyecto Node.js sigue un modelo de [Búsqueda de Consenso](https://en.wikipedia.org/wiki/Consensus-seeking_decision-making) en la toma de decisiones. ## Colaboradores -El repositorio GitHub de [nodejs/node][] es mantenido por los Colaboradores que son agregados por el Comité Directivo Técnico ([TSC][]) de forma continua. +El repositorio GitHub de [nodejs/node](https://github.com/nodejs/node) es mantenido por los Colaboradores que son agregados por el Comité Directivo Técnico ([TSC](https://github.com/nodejs/TSC)) de forma continua. Las personas que hacen contribuciones importantes y valiosas se convierten en Colaboradores y se les otorga permisos de escritura al proyecto. Estas personas están identificadas por el TSC y su nominación se discute con los Colaboradores existentes. -Para ver la lista actual de Colaboradores, consulte el archivo [README.md][] del proyecto. +Para ver la lista actual de Colaboradores, consulte el archivo [README.md](https://github.com/nodejs/node/blob/master/README.md#current-project-team-members) del proyecto. -Se mantiene una guía para Colaboradores en [COLLABORATOR_GUIDE.md][]. +Se mantiene una guía para Colaboradores en [COLLABORATOR_GUIDE.md](https://github.com/nodejs/node/blob/master/COLLABORATOR_GUIDE.md). ## Comités de nivel superior -El proyecto está gestionado conjuntamente por el [Comité de Dirección Técnica (TSC)][] - que es responsable de la orientación de alto nivel del proyecto, y el -[Comité de la Comunidad (CommComm)][] que se encarga de guiar y ampliar la comunidad de Node.js. - -[COLLABORATOR_GUIDE.md]: https://github.com/nodejs/node/blob/master/COLLABORATOR_GUIDE.md -[Comité de la Comunidad (CommComm)]: https://github.com/nodejs/community-committee/blob/master/Community-Committee-Charter.md -[Búsqueda de Consenso]: https://en.wikipedia.org/wiki/Consensus-seeking_decision-making -[README.md]: https://github.com/nodejs/node/blob/master/README.md#current-project-team-members -[Comité de Dirección Técnica (TSC)]: https://github.com/nodejs/TSC/blob/master/TSC-Charter.md -[TSC]: https://github.com/nodejs/TSC -[nodejs/node]: https://github.com/nodejs/node +El proyecto está gestionado conjuntamente por el [Comité de Dirección Técnica (TSC)](https://github.com/nodejs/TSC/blob/master/TSC-Charter.md) que es responsable de la orientación de alto nivel del proyecto, y el [Comité de la Comunidad (CommComm)](https://github.com/nodejs/community-committee/blob/master/Community-Committee-Charter.md) que se encarga de guiar y ampliar la comunidad de Node.js. diff --git a/locale/es/about/index.md b/locale/es/about/index.md index 8a73a5fece6c..4dbf5915de8c 100644 --- a/locale/es/about/index.md +++ b/locale/es/about/index.md @@ -6,10 +6,7 @@ trademark: Trademark # Acerca de Node.js® -Concebido como un entorno de ejecución de JavaScript orientado a eventos asíncronos, Node.js está diseñado -para construir aplicaciones en red escalables. En la siguiente aplicación de ejemplo "hola mundo", se pueden -manejar muchas conexiones concurrentes. Por cada conexión el *callback* será ejecutado, sin embargo -si no hay trabajo que hacer Node.js estará durmiendo. +Concebido como un entorno de ejecución de JavaScript orientado a eventos asíncronos, Node.js está diseñado para construir aplicaciones en red escalables. En la siguiente aplicación de ejemplo "hola mundo", se pueden manejar muchas conexiones concurrentes. Por cada conexión el *callback* será ejecutado, sin embargo si no hay trabajo que hacer Node.js estará durmiendo. ```javascript const http = require('http'); @@ -28,39 +25,14 @@ server.listen(port, hostname, () => { }); ``` -Esto contrasta con el modelo de concurrencia más común hoy en día, donde se usan -hilos del Sistema Operativo. Las operaciones de redes basadas en hilos son relativamente ineficientes -y son muy difíciles de usar. Además, los usuarios de Node.js están libres de preocupaciones -sobre el bloqueo del proceso, ya que no existe. Casi ninguna función en Node.js realiza -I/O directamente, así que el proceso nunca se bloquea. Debido a que no hay bloqueo -es muy razonable desarrollar sistemas escalables en Node.js. +Esto contrasta con el modelo de concurrencia más común hoy en día, donde se usan hilos del Sistema Operativo. Las operaciones de redes basadas en hilos son relativamente ineficientes y son muy difíciles de usar. Además, los usuarios de Node.js están libres de preocupaciones sobre el bloqueo del proceso, ya que no existe. Casi ninguna función en Node.js realiza I/O directamente, así que el proceso nunca se bloquea. Debido a que no hay bloqueo es muy razonable desarrollar sistemas escalables en Node.js. -Si alguno de estos términos no le es familiar, hay un artículo completo en -[Blocking vs Non-Blocking][]. +Si alguno de estos términos no le es familiar, hay un artículo completo en [Blocking vs Non-Blocking](https://github.com/nodejs/node/blob/master/doc/topics/blocking-vs-non-blocking.md). --- -Node.js tiene un diseño similar y está influenciado por sistemas como -[Event Machine][] de Ruby ó [Twisted][] de Python. Node.js lleva el modelo de eventos un poco -más allá, este presenta un [bucle de eventos][] como un entorno en vez de una librería. En otros sistemas siempre existe una llamada -que bloquea para iniciar el bucle de eventos. El comportamiento es típicamente definido a través de *callbacks* al inicio -del script y al final se inicia el servidor mediante una llamada de bloqueo como `EventMachine::run()`. En Node.js no existe esta llamada. -Node.js simplemente ingresa el bucle de eventos después de ejecutar el script de entrada. -Node.js sale del bucle de eventos cuando no hay más *callbacks* que ejecutar. Se comporta de una -forma similar a JavaScript en el navegador - el bucle de eventos está oculto al usuario. +Node.js tiene un diseño similar y está influenciado por sistemas como [Event Machine](https://github.com/eventmachine/eventmachine) de Ruby ó [Twisted](https://twistedmatrix.com/trac/) de Python. Node.js lleva el modelo de eventos un poco más allá, este presenta un [bucle de eventos](https://github.com/nodejs/node/blob/master/doc/topics/event-loop-timers-and-nexttick.md) como un entorno en vez de una librería. En otros sistemas siempre existe una llamada que bloquea para iniciar el bucle de eventos. El comportamiento es típicamente definido a través de *callbacks* al inicio del script y al final se inicia el servidor mediante una llamada de bloqueo como `EventMachine::run()`. En Node.js no existe esta llamada. Node.js simplemente ingresa el bucle de eventos después de ejecutar el script de entrada. Node.js sale del bucle de eventos cuando no hay más *callbacks* que ejecutar. Se comporta de una forma similar a JavaScript en el navegador - el bucle de eventos está oculto al usuario. -HTTP es ciudadano de primera clase en Node.js, diseñado con operaciones de streaming y baja latencia -en mente. Esto hace a Node.js candidato para ser la base de una librería o un framework web. +HTTP es ciudadano de primera clase en Node.js, diseñado con operaciones de streaming y baja latencia en mente. Esto hace a Node.js candidato para ser la base de una librería o un framework web. -Solo porque Node.js esté diseñado sin hilos, no significa que usted no puede -aprovechar los múltiples cores de su sistema. Procesos hijos pueden ser lanzados -usando nuestra API [`child_process.fork()`][], la cual está diseñada para comunicarse -fácilmente con el proceso principal. Construida sobre la misma interfaz está el módulo [`cluster`][], -el cual permite compartir sockets entre procesos para activar el balanceo de cargas en sus múltiples cores. - -[Blocking vs Non-Blocking]: https://github.com/nodejs/node/blob/master/doc/topics/blocking-vs-non-blocking.md -[`child_process.fork()`]: https://nodejs.org/api/child_process.html#child_process_child_process_fork_modulepath_args_options -[`cluster`]: https://nodejs.org/api/cluster.html -[bucle de eventos]: https://github.com/nodejs/node/blob/master/doc/topics/event-loop-timers-and-nexttick.md -[Event Machine]: https://github.com/eventmachine/eventmachine -[Twisted]: https://twistedmatrix.com/trac/ +Solo porque Node.js esté diseñado sin hilos, no significa que usted no puede aprovechar los múltiples cores de su sistema. Procesos hijos pueden ser lanzados usando nuestra API [`child_process.fork()`][], la cual está diseñada para comunicarse fácilmente con el proceso principal. Construida sobre la misma interfaz está el módulo [`cluster`][], el cual permite compartir sockets entre procesos para activar el balanceo de cargas en sus múltiples cores. diff --git a/locale/es/about/privacy.md b/locale/es/about/privacy.md new file mode 100644 index 000000000000..678a0bf4127d --- /dev/null +++ b/locale/es/about/privacy.md @@ -0,0 +1,94 @@ +--- +title: Privacy Policy +layout: about.hbs +--- + +# Privacy Policy + +NODE.JS FOUNDATION (the "Foundation”) is committed to protecting the privacy of its users. This Privacy Policy (or the “Policy”) applies to its websites (whether currently or in the future supported, hosted or maintained, including without limitation nodejs.org, the “Sites”) and describes the information the Foundation collects about users of the Sites (“users”) and how that information may be used. + +Read the Privacy Policy carefully. By using any Site, you will be deemed to have accepted the terms of the Policy. If you do not agree to accept the terms of the Privacy Policy, you are directed to discontinue accessing or otherwise using the Sites or any materials obtained from the Sites. + +## Changes to the Privacy Policy +The Foundation reserves the right to update and change this Privacy Policy from time to time. Each time a user uses the Sites, the current version of the Privacy Policy applies. Accordingly, a user should check the date of this Privacy Policy (which appears at the top) and review for any changes since the last version. If a user does not agree to the Privacy Policy, the user should not use any of the Sites. Continued use any of the Sites following any revision of this Privacy Policy constitutes an acceptance of any change. + +## What Does this Privacy Policy Cover? +This Privacy Policy covers the Foundation’s treatment of aggregate information collected by the Sites and personal information that you provide in connection with your use of the Sites. This Policy does not apply to the practices of third parties that the Foundation does not own or control, including but not limited to third party services you access through the Foundation, or to individuals that the Foundation does not employ or manage. + +## Children Under 13 Years of Age +Unless specifically indicated within a Site, the Sites are not intended for minor children not of age (including without limitation those under 13), and they should not use the Sites. If you are under 18, you may use the Site only with involvement of a parent or guardian or if you are an emancipated minor. Except as specifically indicated within a Site, we do not knowingly collect or solicit information from, market to or accept services from children. If we become aware that a child under 13 has provided us with personal information without parental consent, we will take reasonable steps to remove such information and terminate the child’s account. If you become aware that a child has provided us with personally identifiable information without parental consent, please contact us at privacy@nodejs.org so we may remove the information. + +## Information About Users that the Foundation Collects +On the Sites, users may order products or services, and register to receive materials. Information collected on the Sites includes community forum content, diaries, profiles, photographs, names, unique identifiers (e.g., social media handles or usernames), contact and billing information (e.g., email address, postal address, telephone, fax), and transaction information. In order to access certain personalized services on the Sites, you may be asked to also create and store a username and password for an account from the Foundation. + +In order to tailor the Foundation’s subsequent communications to users and continuously improve the Sites’ products and services, the Foundation may also ask users to provide information regarding their interests, demographics, experience and detailed contact preferences. The Foundation and third party advertising companies may track information concerning a user’s use of the Sites, such as a user’s IP address. + +## How the Foundation Uses the Information Collected +The Foundation may use collected information for any lawful purpose related to the Foundation’s business, including, but not limited to: + +* To understand a user’s needs and create content that is relevant to the user; +* To generate statistical studies; +* To conduct market research and planning by sending user surveys; +* To notify user referrals of services, information, or products when a user requests that the Foundation send such information to referrals; +* To improve services, information, and products; +* To help a user complete a transaction, or provide services or customer support; +* To communicate back to the user; +* To update the user on services, information, and products; +* To personalize a Site for the user; +* To notify the user of any changes with a Site that may affect the user; +* To enforce terms of use on a Site; and +* To allow the user to purchase products, access services, or otherwise engage in activities the user selects. + +User names, identifications ("IDs"), and email addresses (as well as any additional information that a user may choose to post) may be publicly available on a Site when users voluntarily and publicly disclose personal information, such as when a user posts information in conjunction with content subject to an Open Source license, or as part of a message posted to a public forum or a publicly released software application. The personal information you may provide to the Foundation may reveal or allow others to discern aspects of your life that are not expressly stated in your profile (for example, your picture or your name may reveal your hair color, race or approximate age). By providing personal information to us when you create or update your account and profile or post a photograph, you are expressly and voluntarily accepting our Terms of Use and freely accepting and agreeing to our processing of your personal information in ways set out by this Privacy Policy. Supplying information to us, including any information deemed “sensitive” by applicable law, is entirely voluntary on your part. You may withdraw your consent to the Foundation’s collection and processing of your information by closing your account. You should be aware that your information may continue to be viewable to others after you close your account, such as on cached pages on Internet search engines. Users may not be able to change or remove public postings once posted. Such information may be used by visitors of these pages to send unsolicited messages. The Foundation is not responsible for any consequences which may occur from the third-party use of information that a user chooses to submit to public pages. + +## Opt Out +A user will always be able to make the decision whether to proceed with any activity that requests personal information including personally identifiable information. If a user does not provide requested information, the user may not be able to complete certain transactions. + +Users are not licensed to add other users to a Site (even users who entered into transactions with them) or to their mailing lists without written consent. The Foundation encourages users to evaluate privacy and security policies of any of the Sites’ transaction partners before entering into transactions or choosing to disclose information. + +## Email +The Foundation may use (or provide to The Linux Foundation or other third party contractors to use) contact information received by the Foundation to email any user with respect to any Foundation or project of The Linux Foundation (a “Project”) opportunity, event or other matter. + +If a user no longer wishes to receive emails from the Foundation or any Project or any Site, the Foundation will (or, if applicable, have The Linux Foundation) provide instructions in each of its emails on how to be removed from any lists. The Foundation will make commercially reasonable efforts to honor such requests. + +## Photographs +Users may have the opportunity to submit photographs to the Sites for product promotions, contests, and other purposes to be disclosed at the time of request. In these circumstances, the Sites are designed to allow the public to view, download, save, and otherwise access the photographs posted. By submitting a photograph, users waive any privacy expectations users have with respect to the security of such photographs, and the Foundation’s use or exploitation of users’ likeness. You may submit a photograph only if you are the copyright holder or if you are authorized to do so under license by the copyright holder, and by submitting a photograph you agree to indemnify and hold the Foundation, its directors, officers, employees and agents harmless from any claims arising out of your submission. By submitting a photograph, you grant the Foundation a perpetual, worldwide, royalty-free license to use the photograph in any media now known of hereinafter invented for any business purpose that the Foundation, at its sole discretion, may decide. + +## Links to Third-Party Sites and Services +The Sites may permit you to access or link to third party websites and information on the Internet, and other websites may contain links to the Sites. When a user uses these links, the user leaves the Sites. The Foundation has not reviewed these third party sites, does not control, and is not responsible for, any of the third party sites, their content or privacy practices. The privacy and security practices of websites accessed from the Sites are not covered by this Privacy Policy, and the Foundation is not responsible for the privacy or security practices or the content of such websites, including but not limited to the third party services you access through the Foundation. If a user decides to access any of the linked sites, the Foundation encourages the user to read the privacy statements of those sites. The user accesses such sites at user’s own risk. + +We may receive information when you use your account to log into a third-party site or application in order to recommend tailored content or advertising to you and to improve your user experience on our site. We may provide reports containing aggregated impression information to third parties to measure Internet traffic and usage patterns. + +## Service Orders +To purchase services, users may be asked to be directed to a third party site, such as PayPal, to pay for their purchases. If applicable, the third party site may collect payment information directly to facilitate a transaction. The Foundation will only record the result of the transaction and any references to the transaction record provided by the third party site. The Foundation is not responsible for the services provided or information collected on such third party sites. + +## Sharing of Information +The Foundation may disclose personal or aggregate information that is associated with your profile as described in this Privacy Policy, as permitted by law or as reasonably necessary to: (1) comply with a legal requirement or process, including, but not limited to, civil and criminal subpoenas, court orders or other compulsory disclosures; (2) investigate and enforce this Privacy Policy or our then-current Terms of Use, if any; (3) respond to claims of a violation of the rights of third parties; (4) respond to customer service inquiries; (5) protect the rights, property, or safety of the Foundation, our users, or the public; or (6) as part of the sale of all or a portion of the assets of the Foundation or as a change in control of the organization or one of its affiliates or in preparation for any of these events. The Foundation reserves the right to supply any such information to any organization into which the Foundation may merge in the future or to which it may make any transfer. Any third party to which the Foundation transfers or sells all or any of its assets will have the right to use the personal and other information that you provide in the manner set out in this Privacy Policy. + +## Is Information About Me Secure? +To keep your information safe, prevent unauthorized access or disclosure, maintain data accuracy, and ensure the appropriate use of information, the Foundation implements industry-standard physical, electronic, and managerial procedures to safeguard and secure the information the Foundation collects. However, the Foundation does not guarantee that unauthorized third parties will never defeat measures taken to prevent improper use of personally identifiable information. + +Access to users’ nonpublic personally identifiable information is restricted to the Foundation and Linux Foundation personnel, including contractors for each such organization on a need-to-know basis. + +User passwords are keys to accounts. Use unique numbers, letters, and special characters for passwords and do not disclose passwords to other people in order to prevent loss of account control. Users are responsible for all actions taken in their accounts. Notify the Foundation of any password compromises, and change passwords periodically to maintain account protection. + +In the event the Foundation becomes aware that the security of a Site has been compromised or user’s personally identifiable information has been disclosed to unrelated third parties as a result of external activity, including but not limited to security attacks or fraud, the Foundation reserves the right to take reasonable appropriate measures, including but not limited to, investigation and reporting, and notification to and cooperation with law enforcement authorities. + +While our aim is to keep data from unauthorized or unsafe access, modification or destruction, no method of transmission on the Internet, or method of electronic storage, is 100% secure and we cannot guarantee its absolute security. + +## Data Protection +Given the international scope of the Foundation, personal information may be visible to persons outside your country of residence, including to persons in countries that your own country’s privacy laws and regulations deem deficient in ensuring an adequate level of protection for such information. If you are unsure whether this privacy statement is in conflict with applicable local rules, you should not submit your information. If you are located within the European Union, you should note that your information will be transferred to the United States, which is deemed by the European Union to have inadequate data protection. Nevertheless, in accordance with local laws implementing the European Union Privacy Directive on the protection of individuals with regard to the processing of personal data and on the free movement of such data, individuals located in countries outside of the United States of America who submit personal information do thereby consent to the general use of such information as provided in this Privacy Policy and to its transfer to and/or storage in the United States of America. By utilizing any Site and/or directly providing personal information to us, you hereby agree to and acknowledge your understanding of the terms of this Privacy Policy, and consent to have your personal data transferred to and processed in the United States and/or in other jurisdictions as determined by the Foundation, notwithstanding your country of origin, or country, state and/or province of residence. If you do not want your personal information collected and used by the Foundation, please do not visit or use the Sites. + +## Governing Law +This Privacy Policy is governed by the laws of the State of California, United States of America without giving any effect to the principles of conflicts of law. + +## California Privacy Rights +The California Online Privacy Protection Action (“CalOPPA”) permits customers who are California residents and who have provided the Foundation with “personal information” as defined in CalOPPA to request certain information about the disclosure of information to third parties for their direct marketing purposes. If you are a California resident with a question regarding this provision, please contact privacy@nodejs.org. + +Please note that the Foundation does not respond to “do not track” signals or other similar mechanisms intended to allow California residents to opt-out of Internet tracking under CalOPPA. The Foundation may track and/or disclose your online activities over time and across different websites to third parties when you use our services. + +## What to Do in the Event of Lost or Stolen Information +You must promptly notify us if you become aware that any information provided by or submitted to our Site or through our Product is lost, stolen, or used without permission at privacy@nodejs.org. + +## Questions or Concerns +If you have any questions or concerns regarding privacy at the Foundation, please send us a detailed message to [privacy@nodejs.org](mailto:privacy@nodejs.org). diff --git a/locale/es/about/releases.md b/locale/es/about/releases.md index d4bb54b628cd..db70209225f8 100644 --- a/locale/es/about/releases.md +++ b/locale/es/about/releases.md @@ -18,3 +18,5 @@ schedule-footer: Las fechas están sujetas a cambios --- # Versiones + +Major Node.js versions enter _Current_ release status for six months, which gives library authors time to add support for them. After six months, odd-numbered releases (9, 11, etc.) become unsupported, and even-numbered releases (10, 12, etc.) move to _Active LTS_ status and are ready for general use. _LTS_ release status is "long-term support", which typically guarantees that critical bugs will be fixed for a total of 30 months. Production applications should only use _Active LTS_ or _Maintenance LTS_ releases. diff --git a/locale/es/about/resources.md b/locale/es/about/resources.md index e2f96f362ba0..e4a7b12bc8a3 100644 --- a/locale/es/about/resources.md +++ b/locale/es/about/resources.md @@ -9,8 +9,7 @@ title: Logos y Gráficos Por favor revise la [Política de Marca Registrada](/es/about/trademark/) para obtener información sobre el uso permitido de Node.js® logos y marcas. -Las pautas para la visualización de la marca Node.js se describen en -las [Pautas Visuales](/static/documents/foundation-visual-guidelines.pdf). +Las pautas para la visualización de la marca Node.js se describen en las [Pautas Visuales](/static/documents/foundation-visual-guidelines.pdf). diff --git a/locale/es/about/trademark.md b/locale/es/about/trademark.md index 7f23aa9156e9..daf75f1206f1 100644 --- a/locale/es/about/trademark.md +++ b/locale/es/about/trademark.md @@ -5,25 +5,10 @@ title: Política de marcas comerciales # Política de marcas comerciales -Las marcas comerciales, marcas de servicio y gráficos de Node.js son símbolos de -la calidad, el rendimiento y la facilidad de uso que las personas asocian con el -software y el proyecto Node.js. Para garantizar que las marcas Node.js sigan simbolizando -estas cualidades, debemos asegurarnos de que las marcas solo se utilicen de -forma que no engañen a las personas o que confundan a Node.js con otro software -de calidad inferior. Si no nos aseguramos de que las marcas se usen de esta manera, -no solo puede confundir a los usuarios, sino que también puede hacer que sea imposible -utilizar la marca para proteger de las personas que exploten maliciosamente la marca -en el futuro. El objetivo principal de esta política es garantizar que esto no suceda -con la marca Node.js, de modo que la comunidad y los usuarios de Node.js siempre estén -protegidos en el futuro. +Las marcas comerciales, marcas de servicio y gráficos de Node.js son símbolos de la calidad, el rendimiento y la facilidad de uso que las personas asocian con el software y el proyecto Node.js. Para garantizar que las marcas Node.js sigan simbolizando estas cualidades, debemos asegurarnos de que las marcas solo se utilicen de forma que no engañen a las personas o que confundan a Node.js con otro software de calidad inferior. Si no nos aseguramos de que las marcas se usen de esta manera, no solo puede confundir a los usuarios, sino que también puede hacer que sea imposible utilizar la marca para proteger de las personas que exploten maliciosamente la marca en el futuro. El objetivo principal de esta política es garantizar que esto no suceda con la marca Node.js, de modo que la comunidad y los usuarios de Node.js siempre estén protegidos en el futuro. -Al mismo tiempo, nos gustaría que los miembros de la comunidad se sientan cómodos -difundiendo información sobre Node.js y participando en la comunidad Node.js. -Mantenga ese objetivo en mente, hemos tratado de hacer que la política sea -tan flexible y fácil de entender como sea legalmente posible. +Al mismo tiempo, nos gustaría que los miembros de la comunidad se sientan cómodos difundiendo información sobre Node.js y participando en la comunidad Node.js. Mantenga ese objetivo en mente, hemos tratado de hacer que la política sea tan flexible y fácil de entender como sea legalmente posible. -Por favor, lea la [política completa](/static/documents/trademark-policy.pdf).. -Si tiene alguna pregunta, no dude en [enviarnos un correo electrónico](mailto:trademark@nodejs.org). +Por favor, lea la [política completa](/static/documents/trademark-policy.pdf).. Si tiene alguna pregunta, no dude en [enviarnos un correo electrónico](mailto:trademark@nodejs.org). -Las pautas para la visualización de la marca Node.js -se describen en las [Directrices visuales](/static/documents/foundation-visual-guidelines.pdf). +Las pautas para la visualización de la marca Node.js se describen en las [Directrices visuales](/static/documents/foundation-visual-guidelines.pdf). diff --git a/locale/es/about/working-groups.md b/locale/es/about/working-groups.md new file mode 100644 index 000000000000..de5e83c55da2 --- /dev/null +++ b/locale/es/about/working-groups.md @@ -0,0 +1,199 @@ +--- +layout: about.hbs +title: Working Groups +--- + +# Core Working Groups + + +Core Working Groups are created by the [Technical Steering Committee (TSC)](https://github.com/nodejs/TSC/blob/master/TSC-Charter.md). + +## Current Working Groups + +* [Addon API](#addon-api) +* [Benchmarking](#benchmarking) +* [Build](#build) +* [Diagnostics](#diagnostics) +* [Docker](#docker) +* [Evangelism](#evangelism) +* [i18n](#i18n) +* [Versión](#release) +* [Security](#security) +* [Streams](#streams) + +### [Addon API](https://github.com/nodejs/nan) + +The Addon API Working Group is responsible for maintaining the NAN project and corresponding _nan_ package in npm. The NAN project makes available an abstraction layer for native add-on authors for Node.js, assisting in the writing of code that is compatible with many actively used versions of Node.js, V8 and libuv. + +Responsibilities include: + +* Maintaining the [NAN](https://github.com/nodejs/nan) GitHub repository, including code, issues and documentation. +* Maintaining the [addon-examples](https://github.com/nodejs/node-addon-examples) GitHub repository, including code, issues and documentation. +* Maintaining the C++ Addon API within the Node.js project, in subordination to the Node.js TSC. +* Maintaining the Addon documentation within the Node.js project, in subordination to the Node.js TSC. +* Maintaining the _nan_ package in npm, releasing new versions as appropriate. +* Messaging about the future of the Node.js and NAN interface to give the community advance notice of changes. + +The current members can be found in their [README](https://github.com/nodejs/nan#collaborators). + +### [Benchmarking](https://github.com/nodejs/benchmarking) + +The purpose of the Benchmark Working Group is to gain consensus on an agreed set of benchmarks that can be used to: + +* track and evangelize performance gains made between Node.js releases +* avoid performance regressions between releases + +Responsibilities include: + +* Identifying 1 or more benchmarks that reflect customer usage. Likely will need more than one to cover typical Node.js use cases including low-latency and high concurrency +* Working to get community consensus on the list chosen +* Adding regular execution of chosen benchmarks to Node.js builds +* Tracking/publicizing performance between builds/releases + +### [Build](https://github.com/nodejs/build) + +The Build Working Group's purpose is to create and maintain a distributed automation infrastructure. + +Responsibilities include: + +* Producing packages for all target platforms. +* Running tests. +* Running performance testing and comparisons. +* Creating and managing build-containers. + +### [Diagnostics](https://github.com/nodejs/diagnostics) + +The Diagnostics Working Group's purpose is to surface a set of comprehensive, documented, and extensible diagnostic interfaces for use by Node.js tools and JavaScript VMs. + +Responsibilities include: + +* Collaborating with V8 to integrate `v8_inspector` into Node.js. +* Collaborating with V8 to integrate `trace_event` into Node.js. +* Collaborating with Core to refine `async_wrap` and `async_hooks`. +* Maintaining and improving OS trace system integration (e.g. ETW, LTTNG, dtrace). +* Documenting diagnostic capabilities and APIs in Node.js and its components. +* Exploring opportunities and gaps, discussing feature requests, and addressing conflicts in Node.js diagnostics. +* Fostering an ecosystem of diagnostics tools for Node.js. +* Defining and adding interfaces/APIs in order to allow dumps to be generated when needed. +* Defining and adding common structures to the dumps generated in order to support tools that want to introspect those dumps. + +### [Docker](https://github.com/nodejs/docker-node) + +The Docker Working Group's purpose is to build, maintain, and improve official Docker images for the Node.js project. + +Responsibilities include: + +* Keeping the official Docker images updated in line with new Node.js releases. +* Decide and implement image improvements and/or fixes. +* Maintain and improve the images' documentation. + +### [Evangelism](https://github.com/nodejs/evangelism) + +The Evangelism Working Group promotes the accomplishments of Node.js and lets the community know how they can get involved. + +Responsibilities include: + +* Facilitating project messaging. +* Managing official project social media. +* Handling the promotion of speakers for meetups and conferences. +* Handling the promotion of community events. +* Publishing regular update summaries and other promotional content. + +### [i18n](https://github.com/nodejs/i18n) + +The i18n Working Groups handle more than just translations. They are endpoints for community members to collaborate with each other in their language of choice. + +Each team is organized around a common spoken language. Each language community might then produce multiple localizations for various project resources. + +Responsibilities include: + +* Translating any Node.js materials they believe are relevant to their community. +* Reviewing processes for keeping translations up to date and of high quality. +* Managing and monitoring social media channels in their language. +* Promoting Node.js speakers for meetups and conferences in their language. + +Each language community maintains its own membership. + +* [nodejs-ar - Arabic (العَرَبِيَّة)](https://github.com/nodejs/nodejs-ar) +* [nodejs-bg - Bulgarian (български)](https://github.com/nodejs/nodejs-bg) +* [nodejs-bn - Bengali (বাংলা)](https://github.com/nodejs/nodejs-bn) +* [nodejs-zh-CN - Chinese (简体中文)](https://github.com/nodejs/nodejs-zh-CN) +* [nodejs-cs - Czech (Čeština)](https://github.com/nodejs/nodejs-cs) +* [nodejs-da - Danish (Dansk)](https://github.com/nodejs/nodejs-da) +* [nodejs-de - German (Deutsch)](https://github.com/nodejs/nodejs-de) +* [nodejs-el - Greek (Ελληνικά)](https://github.com/nodejs/nodejs-el) +* [nodejs-es - Spanish (Español)](https://github.com/nodejs/nodejs-es) +* [nodejs-fa - Persian (فارسی)](https://github.com/nodejs/nodejs-fa) +* [nodejs-fi - Finnish (Suomi)](https://github.com/nodejs/nodejs-fi) +* [nodejs-fr - French (Français)](https://github.com/nodejs/nodejs-fr) +* [nodejs-he - Hebrew (עברית)](https://github.com/nodejs/nodejs-he) +* [nodejs-hi - Hindi (हिन्दी)](https://github.com/nodejs/nodejs-hi) +* [nodejs-hu - Hungarian (Magyar)](https://github.com/nodejs/nodejs-hu) +* [nodejs-id - Indonesian (Bahasa Indonesia)](https://github.com/nodejs/nodejs-id) +* [nodejs-it - Italian (Italiano)](https://github.com/nodejs/nodejs-it) +* [nodejs-ja - Japanese (日本語)](https://github.com/nodejs/nodejs-ja) +* [nodejs-ka - Georgian (ქართული)](https://github.com/nodejs/nodejs-ka) +* [nodejs-ko - Korean (한국어)](https://github.com/nodejs/nodejs-ko) +* [nodejs-mk - Macedonian (Македонски)](https://github.com/nodejs/nodejs-mk) +* [nodejs-ms - Malay (بهاس ملايو‎)](https://github.com/nodejs/nodejs-ms) +* [nodejs-nl - Dutch (Nederlands)](https://github.com/nodejs/nodejs-nl) +* [nodejs-no - Norwegian (Norsk)](https://github.com/nodejs/nodejs-no) +* [nodejs-pl - Polish (Język Polski)](https://github.com/nodejs/nodejs-pl) +* [nodejs-pt - Portuguese (Português)](https://github.com/nodejs/nodejs-pt) +* [nodejs-ro - Romanian (Română)](https://github.com/nodejs/nodejs-ro) +* [nodejs-ru - Russian (Русский)](https://github.com/nodejs/nodejs-ru) +* [nodejs-sv - Swedish (Svenska)](https://github.com/nodejs/nodejs-sv) +* [nodejs-ta - Tamil (தமிழ்)](https://github.com/nodejs/nodejs-ta) +* [nodejs-tr - Turkish (Türkçe)](https://github.com/nodejs/nodejs-tr) +* [nodejs-zh-TW - Taiwanese (繁體中文(台灣))](https://github.com/nodejs/nodejs-zh-TW) +* [nodejs-uk - Ukrainian (Українська)](https://github.com/nodejs/nodejs-uk) +* [nodejs-vi - Vietnamese (Tiếng Việt)](https://github.com/nodejs/nodejs-vi) + +### [Versión](https://github.com/nodejs/Release) + +The Release Working Group manages the release process for Node.js. + +Responsibilities include: + +* Define the release process. +* Define the content of releases. +* Generate and create releases. +* Test Releases. +* Manage the Long Term Support and Current branches including backporting changes to these branches. +* Define the policy for what gets backported to release streams + +### [Security](https://github.com/nodejs/security-wg) + +The Security Working Group manages all aspects and processes linked to Node.js security. + +Responsibilities include: + +* Define and maintain security policies and procedures for: + * the core Node.js project + * other projects maintained by the Node.js Technical Steering Committee (TSC). +* Work with the Node Security Platform to bring community vulnerability data into the foundation as a shared asset. +* Ensure the vulnerability data is updated in an efficient and timely manner. For example, ensuring there are well-documented processes for reporting vulnerabilities in community modules. +* Review and recommend processes for handling of security reports (but not the actual administration of security reports, which are reviewed by a group of people directly delegated to by the TSC). +* Define and maintain policies and procedures for the coordination of security concerns within the external Node.js open source ecosystem. +* Offer help to npm package maintainers to fix high-impact security bugs. +* Maintain and make available data on disclosed security vulnerabilities in: + * the core Node.js project + * other projects maintained by the Node.js Foundation technical group + * the external Node.js open source ecosystem +* Promote the improvement of security practices within the Node.js ecosystem. +* Recommend security improvements for the core Node.js project. +* Facilitate and promote the expansion of a healthy security service and product provider ecosystem. + +### [Streams](https://github.com/nodejs/readable-stream) + +The Streams Working Group is dedicated to the support and improvement of the Streams API as used in Node.js and the npm ecosystem. We seek to create a composable API that solves the problem of representing multiple occurrences of an event over time in a humane, low-overhead fashion. Improvements to the API will be driven by the needs of the ecosystem; interoperability and backwards compatibility with other solutions and prior versions are paramount in importance. + +Responsibilities include: + +* Addressing stream issues on the Node.js issue tracker. +* Authoring and editing stream documentation within the Node.js project. +* Reviewing changes to stream subclasses within the Node.js project. +* Redirecting changes to streams from the Node.js project to this project. +* Assisting in the implementation of stream providers within Node.js. +* Recommending versions of `readable-stream` to be included in Node.js. +* Messaging about the future of streams to give the community advance notice of changes. diff --git a/locale/es/docs/es6.md b/locale/es/docs/es6.md new file mode 100644 index 000000000000..e73cd7bb6604 --- /dev/null +++ b/locale/es/docs/es6.md @@ -0,0 +1,46 @@ +--- +title: ECMAScript 2015 (ES6) and beyond +layout: docs.hbs +--- + +# ECMAScript 2015 (ES6) and beyond + +Node.js is built against modern versions of [V8](https://v8.dev/). By keeping up-to-date with the latest releases of this engine, we ensure new features from the [JavaScript ECMA-262 specification](http://www.ecma-international.org/publications/standards/Ecma-262.htm) are brought to Node.js developers in a timely manner, as well as continued performance and stability improvements. + +All ECMAScript 2015 (ES6) features are split into three groups for **shipping**, **staged**, and **in progress** features: + +* All **shipping** features, which V8 considers stable, are turned **on by default on Node.js** and do **NOT** require any kind of runtime flag. +* **Staged** features, which are almost-completed features that are not considered stable by the V8 team, require a runtime flag: `--harmony`. +* **In progress** features can be activated individually by their respective harmony flag, although this is highly discouraged unless for testing purposes. Note: these flags are exposed by V8 and will potentially change without any deprecation notice. + +## Which features ship with which Node.js version by default? + +The website [node.green](https://node.green/) provides an excellent overview over supported ECMAScript features in various versions of Node.js, based on kangax's compat-table. + +## Which features are in progress? + +New features are constantly being added to the V8 engine. Generally speaking, expect them to land on a future Node.js release, although timing is unknown. + +You may list all the *in progress* features available on each Node.js release by grepping through the `--v8-options` argument. Please note that these are incomplete and possibly broken features of V8, so use them at your own risk: + +```bash +node --v8-options | grep "in progress" +``` + +## What about the performance of a particular feature? + +The V8 team is constantly working to improve the performance of new language features to eventually reach parity with their transpiled or native counterparts in EcmaScript 5 and earlier. The current progress there is tracked on the website [six-speed](https://fhinkel.github.io/six-speed), which shows the performance of ES2015 and ESNext features compared to their native ES5 counterparts. + +The work on optimizing features introduced with ES2015 and beyond is coordinated via a [performance plan](https://docs.google.com/document/d/1EA9EbfnydAmmU_lM8R_uEMQ-U_v4l9zulePSBkeYWmY), where the V8 team gathers and coordinates areas that need improvement, and design documents to tackle those problems. + +## I have my infrastructure set up to leverage the --harmony flag. Should I remove it? + +The current behaviour of the `--harmony` flag on Node.js is to enable **staged** features only. After all, it is now a synonym of `--es_staging`. As mentioned above, these are completed features that have not been considered stable yet. If you want to play safe, especially on production environments, consider removing this runtime flag until it ships by default on V8 and, consequently, on Node.js. If you keep this enabled, you should be prepared for further Node.js upgrades to break your code if V8 changes their semantics to more closely follow the standard. + +## How do I find which version of V8 ships with a particular version of Node.js? + +Node.js provides a simple way to list all dependencies and respective versions that ship with a specific binary through the `process` global object. In case of the V8 engine, type the following in your terminal to retrieve its version: + +```bash +node -p process.versions.v8 +``` diff --git a/locale/es/docs/guides/abi-stability.md b/locale/es/docs/guides/abi-stability.md new file mode 100644 index 000000000000..25c716db98bc --- /dev/null +++ b/locale/es/docs/guides/abi-stability.md @@ -0,0 +1,35 @@ +--- +title: ABI Stability +layout: docs.hbs +--- + +# ABI Stability + +## Introduction +An Application Binary Interface (ABI) is a way for programs to call functions and use data structures from other compiled programs. It is the compiled version of an Application Programming Interface (API). In other words, the headers files describing the classes, functions, data structures, enumerations, and constants which enable an application to perform a desired task correspond by way of compilation to a set of addresses and expected parameter values and memory structure sizes and layouts with which the provider of the ABI was compiled. + +The application using the ABI must be compiled such that the available addresses, expected parameter values, and memory structure sizes and layouts agree with those with which the ABI provider was compiled. This is usually accomplished by compiling against the headers provided by the ABI provider. + +Since the provider of the ABI and the user of the ABI may be compiled at different times with different versions of the compiler, a portion of the responsibility for ensuring ABI compatibility lies with the compiler. Different versions of the compiler, perhaps provided by different vendors, must all produce the same ABI from a header file with a certain content, and must produce code for the application using the ABI that accesses the API described in a given header according to the conventions of the ABI resulting from the description in the header. Modern compilers have a fairly good track record of not breaking the ABI compatibility of the applications they compile. + +The remaining responsibility for ensuring ABI compatibility lies with the team maintaining the header files which provide the API that results, upon compilation, in the ABI that is to remain stable. Changes to the header files can be made, but the nature of the changes has to be closely tracked to ensure that, upon compilation, the ABI does not change in a way that will render existing users of the ABI incompatible with the new version. + +## ABI Stability in Node.js +Node.js provides header files maintained by several independent teams. For example, header files such as `node.h` and `node_buffer.h` are maintained by the Node.js team. `v8.h` is maintained by the V8 team, which, although in close co-operation with the Node.js team, is independent, and with its own schedule and priorities. Thus, the Node.js team has only partial control over the changes that are introduced in the headers the project provides. As a result, the Node.js project has adopted [semantic versioning](https://semver.org/). This ensures that the APIs provided by the project will result in a stable ABI for all minor and patch versions of Node.js released within one major version. In practice, this means that the Node.js project has committed itself to ensuring that a Node.js native addon compiled against a given major version of Node.js will load successfully when loaded by any Node.js minor or patch version within the major version against which it was compiled. + +## N-API +Demand has arisen for equipping Node.js with an API that results in an ABI that remains stable across multiple Node.js major versions. The motivation for creating such an API is as follows: + +* The JavaScript language has remained compatible with itself since its very early days, whereas the ABI of the engine executing the JavaScript code changes with every major version of Node.js. This means that applications consisting of Node.js packages written entirely in JavaScript need not be recompiled, reinstalled, or redeployed as a new major version of Node.js is dropped into the production environment in which such applications run. In contrast, if an application depends on a package that contains a native addon, the application has to be recompiled, reinstalled, and redeployed whenever a new major version of Node.js is introduced into the production environment. This disparity between Node.js packages containing native addons and those that are written entirely in JavaScript has added to the maintenance burden of production systems which rely on native addons. + +* Other projects have started to produce JavaScript interfaces that are essentially alternative implementations of Node.js. Since these projects are usually built on a different JavaScript engine than V8, their native addons necessarily take on a different structure and use a different API. Nevertheless, using a single API for a native addon across different implementations of the Node.js JavaScript API would allow these projects to take advantage of the ecosystem of JavaScript packages that has accrued around Node.js. + +* Node.js may contain a different JavaScript engine in the future. This means that, externally, all Node.js interfaces would remain the same, but the V8 header file would be absent. Such a step would cause the disruption of the Node.js ecosystem in general, and that of the native addons in particular, if an API that is JavaScript engine agnostic is not first provided by Node.js and adopted by native addons. + +To these ends Node.js has introduced N-API in version 8.6.0 and marked it as a stable component of the project as of Node.js 8.12.0. The API is defined in the headers [`node_api.h`][] and [`node_api_types.h`][], and provides a forward- compatibility guarantee that crosses the Node.js major version boundary. The guarantee can be stated as follows: + +**A given version *n* of N-API will be available in the major version of Node.js in which it was published, and in all subsequent versions of Node.js, including subsequent major versions.** + +A native addon author can take advantage of the N-API forward compatibility guarantee by ensuring that the addon makes use only of APIs defined in `node_api.h` and data structures and constants defined in `node_api_types.h`. By doing so, the author facilitates adoption of their addon by indicating to production users that the maintenance burden for their application will increase no more by the addition of the native addon to their project than it would by the addition of a package written purely in JavaScript. + +N-API is versioned because new APIs are added from time to time. Unlike semantic versioning, N-API versioning is cumulative. That is, each version of N-API conveys the same meaning as a minor version in the semver system, meaning that all changes made to N-API will be backwards compatible. Additionally, new N-APIs are added under an experimental flag to give the community an opportunity to vet them in a production environment. Experimental status means that, although care has been taken to ensure that the new API will not have to be modified in an ABI-incompatible way in the future, it has not yet been sufficiently proven in production to be correct and useful as designed and, as such, may undergo ABI-incompatible changes before it is finally incorporated into a forthcoming version of N-API. That is, an experimental N-API is not yet covered by the forward compatibility guarantee. diff --git a/locale/es/docs/guides/anatomy-of-an-http-transaction.md b/locale/es/docs/guides/anatomy-of-an-http-transaction.md new file mode 100644 index 000000000000..da0a84d34679 --- /dev/null +++ b/locale/es/docs/guides/anatomy-of-an-http-transaction.md @@ -0,0 +1,316 @@ +--- +title: Anatomy of an HTTP Transaction +layout: docs.hbs +--- + +# Anatomy of an HTTP Transaction + +The purpose of this guide is to impart a solid understanding of the process of Node.js HTTP handling. We'll assume that you know, in a general sense, how HTTP requests work, regardless of language or programming environment. We'll also assume a bit of familiarity with Node.js [`EventEmitters`][] and [`Streams`][]. If you're not quite familiar with them, it's worth taking a quick read through the API docs for each of those. + +## Create the Server + +Any node web server application will at some point have to create a web server object. This is done by using [`createServer`][]. + +```javascript +const http = require('http'); + +const server = http.createServer((request, response) => { + // magic happens here! +}); +``` + +The function that's passed in to [`createServer`][] is called once for every HTTP request that's made against that server, so it's called the request handler. In fact, the [`Server`][] object returned by [`createServer`][] is an [`EventEmitter`][], and what we have here is just shorthand for creating a `server` object and then adding the listener later. + +```javascript +const server = http.createServer(); +server.on('request', (request, response) => { + // the same kind of magic happens here! +}); +``` + +When an HTTP request hits the server, node calls the request handler function with a few handy objects for dealing with the transaction, `request` and `response`. We'll get to those shortly. + +In order to actually serve requests, the [`listen`][] method needs to be called on the `server` object. In most cases, all you'll need to pass to `listen` is the port number you want the server to listen on. There are some other options too, so consult the [API reference](https://nodejs.org/api/http.html). + +## Method, URL and Headers + +When handling a request, the first thing you'll probably want to do is look at the method and URL, so that appropriate actions can be taken. Node.js makes this relatively painless by putting handy properties onto the `request` object. + +```javascript +const { method, url } = request; +``` + +> **Note:** The `request` object is an instance of [`IncomingMessage`][]. + +The `method` here will always be a normal HTTP method/verb. The `url` is the full URL without the server, protocol or port. For a typical URL, this means everything after and including the third forward slash. + +Headers are also not far away. They're in their own object on `request` called `headers`. + +```javascript +const { headers } = request; +const userAgent = headers['user-agent']; +``` + +It's important to note here that all headers are represented in lower-case only, regardless of how the client actually sent them. This simplifies the task of parsing headers for whatever purpose. + +If some headers are repeated, then their values are overwritten or joined together as comma-separated strings, depending on the header. In some cases, this can be problematic, so [`rawHeaders`][] is also available. + +## Request Body + +When receiving a `POST` or `PUT` request, the request body might be important to your application. Getting at the body data is a little more involved than accessing request headers. The `request` object that's passed in to a handler implements the [`ReadableStream`][] interface. This stream can be listened to or piped elsewhere just like any other stream. We can grab the data right out of the stream by listening to the stream's `'data'` and `'end'` events. + +The chunk emitted in each `'data'` event is a [`Buffer`][]. If you know it's going to be string data, the best thing to do is collect the data in an array, then at the `'end'`, concatenate and stringify it. + +```javascript +let body = []; +request.on('data', (chunk) => { + body.push(chunk); +}).on('end', () => { + body = Buffer.concat(body).toString(); + // at this point, `body` has the entire request body stored in it as a string +}); +``` + +> **Note:** This may seem a tad tedious, and in many cases, it is. Luckily, there are modules like [`concat-stream`][] and [`body`][] on [`npm`][] which can help hide away some of this logic. It's important to have a good understanding of what's going on before going down that road, and that's why you're here! + +## A Quick Thing About Errors + +Since the `request` object is a [`ReadableStream`][], it's also an [`EventEmitter`][] and behaves like one when an error happens. + +An error in the `request` stream presents itself by emitting an `'error'` event on the stream. **If you don't have a listener for that event, the error will be *thrown*, which could crash your Node.js program.** You should therefore add an `'error'` listener on your request streams, even if you just log it and continue on your way. (Though it's probably best to send some kind of HTTP error response. More on that later.) + +```javascript +request.on('error', (err) => { + // This prints the error message and stack trace to `stderr`. + console.error(err.stack); +}); +``` + +There are other ways of [handling these errors](https://nodejs.org/api/errors.html) such as other abstractions and tools, but always be aware that errors can and do happen, and you're going to have to deal with them. + +## What We've Got so Far + +At this point, we've covered creating a server, and grabbing the method, URL, headers and body out of requests. When we put that all together, it might look something like this: + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + const { headers, method, url } = request; + let body = []; + request.on('error', (err) => { + console.error(err); + }).on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + // At this point, we have the headers, method, url and body, and can now + // do whatever we need to in order to respond to this request. + }); +}).listen(8080); // Activates this server, listening on port 8080. +``` + +If we run this example, we'll be able to *receive* requests, but not *respond* to them. In fact, if you hit this example in a web browser, your request would time out, as nothing is being sent back to the client. + +So far we haven't touched on the `response` object at all, which is an instance of [`ServerResponse`][], which is a [`WritableStream`][]. It contains many useful methods for sending data back to the client. We'll cover that next. + +## HTTP Status Code + +If you don't bother setting it, the HTTP status code on a response will always be 200. Of course, not every HTTP response warrants this, and at some point you'll definitely want to send a different status code. To do that, you can set the `statusCode` property. + +```javascript +response.statusCode = 404; // Tell the client that the resource wasn't found. +``` + +There are some other shortcuts to this, as we'll see soon. + +## Setting Response Headers + +Headers are set through a convenient method called [`setHeader`][]. + +```javascript +response.setHeader('Content-Type', 'application/json'); +response.setHeader('X-Powered-By', 'bacon'); +``` + +When setting the headers on a response, the case is insensitive on their names. If you set a header repeatedly, the last value you set is the value that gets sent. + +## Explicitly Sending Header Data + +The methods of setting the headers and status code that we've already discussed assume that you're using "implicit headers". This means you're counting on node to send the headers for you at the correct time before you start sending body data. + +If you want, you can *explicitly* write the headers to the response stream. To do this, there's a method called [`writeHead`][], which writes the status code and the headers to the stream. + +```javascript +response.writeHead(200, { + 'Content-Type': 'application/json', + 'X-Powered-By': 'bacon' +}); +``` + +Once you've set the headers (either implicitly or explicitly), you're ready to start sending response data. + +## Sending Response Body + +Since the `response` object is a [`WritableStream`][], writing a response body out to the client is just a matter of using the usual stream methods. + +```javascript +response.write(''); +response.write(''); +response.write('

Hello, World!

'); +response.write(''); +response.write(''); +response.end(); +``` + +The `end` function on streams can also take in some optional data to send as the last bit of data on the stream, so we can simplify the example above as follows. + +```javascript +response.end('

Hello, World!

'); +``` + +> **Note:** It's important to set the status and headers *before* you start writing chunks of data to the body. This makes sense, since headers come before the body in HTTP responses. + +## Another Quick Thing About Errors + +The `response` stream can also emit `'error'` events, and at some point you're going to have to deal with that as well. All of the advice for `request` stream errors still applies here. + +## Put It All Together + +Now that we've learned about making HTTP responses, let's put it all together. Building on the earlier example, we're going to make a server that sends back all of the data that was sent to us by the user. We'll format that data as JSON using `JSON.stringify`. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + const { headers, method, url } = request; + let body = []; + request.on('error', (err) => { + console.error(err); + }).on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + // BEGINNING OF NEW STUFF + + response.on('error', (err) => { + console.error(err); + }); + + response.statusCode = 200; + response.setHeader('Content-Type', 'application/json'); + // Note: the 2 lines above could be replaced with this next one: + // response.writeHead(200, {'Content-Type': 'application/json'}) + + const responseBody = { headers, method, url, body }; + + response.write(JSON.stringify(responseBody)); + response.end(); + // Note: the 2 lines above could be replaced with this next one: + // response.end(JSON.stringify(responseBody)) + + // END OF NEW STUFF + }); +}).listen(8080); +``` + +## Echo Server Example + +Let's simplify the previous example to make a simple echo server, which just sends whatever data is received in the request right back in the response. All we need to do is grab the data from the request stream and write that data to the response stream, similar to what we did previously. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + let body = []; + request.on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + response.end(body); + }); +}).listen(8080); +``` + +Now let's tweak this. We want to only send an echo under the following conditions: + +* The request method is POST. +* The URL is `/echo`. + +In any other case, we want to simply respond with a 404. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + if (request.method === 'POST' && request.url === '/echo') { + let body = []; + request.on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + response.end(body); + }); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +> **Note:** By checking the URL in this way, we're doing a form of "routing". Other forms of routing can be as simple as `switch` statements or as complex as whole frameworks like [`express`][]. If you're looking for something that does routing and nothing else, try [`router`][]. + +Great! Now let's take a stab at simplifying this. Remember, the `request` object is a [`ReadableStream`][] and the `response` object is a [`WritableStream`][]. That means we can use [`pipe`][] to direct data from one to the other. That's exactly what we want for an echo server! + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + if (request.method === 'POST' && request.url === '/echo') { + request.pipe(response); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +Yay streams! + +We're not quite done yet though. As mentioned multiple times in this guide, errors can and do happen, and we need to deal with them. + +To handle errors on the request stream, we'll log the error to `stderr` and send a 400 status code to indicate a `Bad Request`. In a real-world application, though, we'd want to inspect the error to figure out what the correct status code and message would be. As usual with errors, you should consult the [`Error` documentation][]. + +On the response, we'll just log the error to `stderr`. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + request.on('error', (err) => { + console.error(err); + response.statusCode = 400; + response.end(); + }); + response.on('error', (err) => { + console.error(err); + }); + if (request.method === 'POST' && request.url === '/echo') { + request.pipe(response); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +We've now covered most of the basics of handling HTTP requests. At this point, you should be able to: + +* Instantiate an HTTP server with a request handler function, and have it listen on a port. +* Get headers, URL, method and body data from `request` objects. +* Make routing decisions based on URL and/or other data in `request` objects. +* Send headers, HTTP status codes and body data via `response` objects. +* Pipe data from `request` objects and to `response` objects. +* Handle stream errors in both the `request` and `response` streams. + +From these basics, Node.js HTTP servers for many typical use cases can be constructed. There are plenty of other things these APIs provide, so be sure to read through the API docs for [`EventEmitters`][], [`Streams`][], and [`HTTP`][]. diff --git a/locale/es/docs/guides/backpressuring-in-streams.md b/locale/es/docs/guides/backpressuring-in-streams.md new file mode 100644 index 000000000000..b3fc5c6d3375 --- /dev/null +++ b/locale/es/docs/guides/backpressuring-in-streams.md @@ -0,0 +1,449 @@ +--- +title: Backpressuring in Streams +layout: docs.hbs +--- + +# Backpressuring in Streams + +There is a general problem that occurs during data handling called [`backpressure`][] and describes a buildup of data behind a buffer during data transfer. When the receiving end of the transfer has complex operations, or is slower for whatever reason, there is a tendency for data from the incoming source to accumulate, like a clog. + +To solve this problem, there must be a delegation system in place to ensure a smooth flow of data from one source to another. Different communities have resolved this issue uniquely to their programs, Unix pipes and TCP sockets are good examples of this, and is often times referred to as _flow control_. In Node.js, streams have been the adopted solution. + +The purpose of this guide is to further detail what backpressure is, and how exactly streams address this in Node.js' source code. The second part of the guide will introduce suggested best practices to ensure your application's code is safe and optimized when implementing streams. + +We assume a little familiarity with the general definition of [`backpressure`][], [`Buffer`][], and [`EventEmitters`][] in Node.js, as well as some experience with [`Stream`][]. If you haven't read through those docs, it's not a bad idea to take a look at the API documentation first, as it will help expand your understanding while reading this guide. + +## The Problem with Data Handling + +In a computer system, data is transferred from one process to another through pipes, sockets, and signals. In Node.js, we find a similar mechanism called [`Stream`][]. Streams are great! They do so much for Node.js and almost every part of the internal codebase utilizes that module. As a developer, you are more than encouraged to use them too! + +```javascript +const readline = require('readline'); + +// process.stdin and process.stdout are both instances of Streams. +const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout +}); + +rl.question('Why should you use streams? ', (answer) => { + console.log(`Maybe it's ${answer}, maybe it's because they are awesome! :)`); + + rl.close(); +}); +``` + +A good example of why the backpressure mechanism implemented through streams is a great optimization can be demonstrated by comparing the internal system tools from Node.js' [`Stream`][] implementation. + +In one scenario, we will take a large file (approximately ~9gb) and compress it using the familiar [`zip(1)`][] tool. + +``` +zip The.Matrix.1080p.mkv +``` + +While that will take a few minutes to complete, in another shell we may run a script that takes Node.js' module [`zlib`][], that wraps around another compression tool, [`gzip(1)`][]. + +```javascript +const gzip = require('zlib').createGzip(); +const fs = require('fs'); + +const inp = fs.createReadStream('The.Matrix.1080p.mkv'); +const out = fs.createWriteStream('The.Matrix.1080p.mkv.gz'); + +inp.pipe(gzip).pipe(out); +``` + +To test the results, try opening each compressed file. The file compressed by the [`zip(1)`][] tool will notify you the file is corrupt, whereas the compression finished by [`Stream`][] will decompress without error. + +Note: In this example, we use `.pipe()` to get the data source from one end to the other. However, notice there are no proper error handlers attached. If a chunk of data were to fail to be properly received, the `Readable` source or `gzip` stream will not be destroyed. [`pump`][] is a utility tool that would properly destroy all the streams in a pipeline if one of them fails or closes, and is a must have in this case! + +[`pump`][] is only necessary for Node.js 8.x or earlier, as for Node.js 10.x or later version, [`pipeline`][] is introduced to replace for [`pump`][]. This is a module method to pipe between streams forwarding errors and properly cleaning up and provide a callback when the pipeline is complete. + +Here is an example of using pipeline: + +```javascript +const { pipeline } = require('stream'); +const fs = require('fs'); +const zlib = require('zlib'); + +// Use the pipeline API to easily pipe a series of streams +// together and get notified when the pipeline is fully done. +// A pipeline to gzip a potentially huge video file efficiently: + +pipeline( + fs.createReadStream('The.Matrix.1080p.mkv'), + zlib.createGzip(), + fs.createWriteStream('The.Matrix.1080p.mkv.gz'), + (err) => { + if (err) { + console.error('Pipeline failed', err); + } else { + console.log('Pipeline succeeded'); + } + } +); +``` + +You can also call [`promisify`][] on pipeline to use it with `async` / `await`: + +```javascript +const stream = require('stream'); +const fs = require('fs'); +const zlib = require('zlib'); +const util = require('util'); + +const pipeline = util.promisify(stream.pipeline); + +async function run() { + try { + await pipeline( + fs.createReadStream('The.Matrix.1080p.mkv'), + zlib.createGzip(), + fs.createWriteStream('The.Matrix.1080p.mkv.gz'), + ); + console.log('Pipeline succeeded'); + } catch (err) { + console.error('Pipeline failed', err); + } +} +``` + +## Too Much Data, Too Quickly + +There are instances where a [`Readable`][] stream might give data to the [`Writable`][] much too quickly — much more than the consumer can handle! + +When that occurs, the consumer will begin to queue all the chunks of data for later consumption. The write queue will get longer and longer, and because of this more data must be kept in memory until the entire process has completed. + +Writing to a disk is a lot slower than reading from a disk, thus, when we are trying to compress a file and write it to our hard disk, backpressure will occur because the write disk will not be able to keep up with the speed from the read. + +```javascript +// Secretly the stream is saying: "whoa, whoa! hang on, this is way too much!" +// Data will begin to build up on the read-side of the data buffer as +// `write` tries to keep up with the incoming data flow. +inp.pipe(gzip).pipe(outputFile); +``` + +This is why a backpressure mechanism is important. If a backpressure system was not present, the process would use up your system's memory, effectively slowing down other processes, and monopolizing a large part of your system until completion. + +This results in a few things: + +* Slowing down all other current processes +* A very overworked garbage collector +* Memory exhaustion + +In the following examples we will take out the [return value](https://github.com/nodejs/node/blob/55c42bc6e5602e5a47fb774009cfe9289cb88e71/lib/_stream_writable.js#L239) of the `.write()` function and change it to `true`, which effectively disables backpressure support in Node.js core. In any reference to 'modified' binary, we are talking about running the `node` binary without the `return ret;` line, and instead with the replaced `return true;`. + +## Excess Drag on Garbage Collection + +Let's take a look at a quick benchmark. Using the same example from above, we ran a few time trials to get a median time for both binaries. + +``` + trial (#) | `node` binary (ms) | modified `node` binary (ms) +================================================================= + 1 | 56924 | 55011 + 2 | 52686 | 55869 + 3 | 59479 | 54043 + 4 | 54473 | 55229 + 5 | 52933 | 59723 +================================================================= +average time: | 55299 | 55975 +``` + +Both take around a minute to run, so there's not much of a difference at all, but let's take a closer look to confirm whether our suspicions are correct. We use the Linux tool [`dtrace`][] to evaluate what's happening with the V8 garbage collector. + +The GC (garbage collector) measured time indicates the intervals of a full cycle of a single sweep done by the garbage collector: + +``` +approx. time (ms) | GC (ms) | modified GC (ms) +================================================= + 0 | 0 | 0 + 1 | 0 | 0 + 40 | 0 | 2 + 170 | 3 | 1 + 300 | 3 | 1 + + * * * + * * * + * * * + + 39000 | 6 | 26 + 42000 | 6 | 21 + 47000 | 5 | 32 + 50000 | 8 | 28 + 54000 | 6 | 35 +``` + +While the two processes start off the same and seem to work the GC at the same rate, it becomes evident that after a few seconds with a properly working backpressure system in place, it spreads the GC load across consistent intervals of 4-8 milliseconds until the end of the data transfer. + +However, when a backpressure system is not in place, the V8 garbage collection starts to drag out. The normal binary called the GC approximately **75** times in a minute, whereas, the modified binary fires only **36** times. + +This is the slow and gradual debt accumulating from growing memory usage. As data gets transferred, without a backpressure system in place, more memory is being used for each chunk transfer. + +The more memory that is being allocated, the more the GC has to take care of in one sweep. The bigger the sweep, the more the GC needs to decide what can be freed up, and scanning for detached pointers in a larger memory space will consume more computing power. + +## Memory Exhaustion + +To determine the memory consumption of each binary, we've clocked each process with `/usr/bin/time -lp sudo ./node ./backpressure-example/zlib.js` individually. + +This is the output on the normal binary: + +``` +Respecting the return value of .write() +============================================= +real 58.88 +user 56.79 +sys 8.79 + 87810048 maximum resident set size + 0 average shared memory size + 0 average unshared data size + 0 average unshared stack size + 19427 page reclaims + 3134 page faults + 0 swaps + 5 block input operations + 194 block output operations + 0 messages sent + 0 messages received + 1 signals received + 12 voluntary context switches + 666037 involuntary context switches +``` + +The maximum byte size occupied by virtual memory turns out to be approximately 87.81 mb. + +And now changing the [return value](https://github.com/nodejs/node/blob/55c42bc6e5602e5a47fb774009cfe9289cb88e71/lib/_stream_writable.js#L239) of the [`.write()`][] function, we get: + +``` +Without respecting the return value of .write(): +================================================== +real 54.48 +user 53.15 +sys 7.43 +1524965376 maximum resident set size + 0 average shared memory size + 0 average unshared data size + 0 average unshared stack size + 373617 page reclaims + 3139 page faults + 0 swaps + 18 block input operations + 199 block output operations + 0 messages sent + 0 messages received + 1 signals received + 25 voluntary context switches + 629566 involuntary context switches +``` + +The maximum byte size occupied by virtual memory turns out to be approximately 1.52 gb. + +Without streams in place to delegate the backpressure, there is an order of magnitude greater of memory space being allocated - a huge margin of difference between the same process! + +This experiment shows how optimized and cost-effective Node.js' backpressure mechanism is for your computing system. Now, let's do a break down on how it works! + +## How Does Backpressure Resolve These Issues? + +There are different functions to transfer data from one process to another. In Node.js, there is an internal built-in function called [`.pipe()`][]. There are [other packages](https://github.com/sindresorhus/awesome-nodejs#streams) out there you can use too! Ultimately though, at the basic level of this process, we have two separate components: the _source_ of the data and the _consumer_. + +When [`.pipe()`][] is called from the source, it signals to the consumer that there is data to be transferred. The pipe function helps to set up the appropriate backpressure closures for the event triggers. + +In Node.js the source is a [`Readable`][] stream and the consumer is the [`Writable`][] stream (both of these may be interchanged with a [`Duplex`][] or a [`Transform`][] stream, but that is out-of-scope for this guide). + +The moment that backpressure is triggered can be narrowed exactly to the return value of a [`Writable`][]'s [`.write()`][] function. This return value is determined by a few conditions, of course. + +In any scenario where the data buffer has exceeded the [`highWaterMark`][] or the write queue is currently busy, [`.write()`][] will return `false`. + +When a `false` value is returned, the backpressure system kicks in. It will pause the incoming [`Readable`][] stream from sending any data and wait until the consumer is ready again. Once the data buffer is emptied, a [`'drain'`][] event will be emitted and resume the incoming data flow. + +Once the queue is finished, backpressure will allow data to be sent again. The space in memory that was being used will free itself up and prepare for the next batch of data. + +This effectively allows a fixed amount of memory to be used at any given time for a [`.pipe()`][] function. There will be no memory leakage, no infinite buffering, and the garbage collector will only have to deal with one area in memory! + +So, if backpressure is so important, why have you (probably) not heard of it? Well the answer is simple: Node.js does all of this automatically for you. + +That's so great! But also not so great when we are trying to understand how to implement our own custom streams. + +Note: In most machines, there is a byte size that determines when a buffer is full (which will vary across different machines). Node.js allows you to set your own custom [`highWaterMark`][], but commonly, the default is set to 16kb (16384, or 16 for objectMode streams). In instances where you might want to raise that value, go for it, but do so with caution! + +## Lifecycle of `.pipe()` + +To achieve a better understanding of backpressure, here is a flow-chart on the lifecycle of a [`Readable`][] stream being [piped](https://nodejs.org/docs/latest/api/stream.html#stream_readable_pipe_destination_options) into a [`Writable`][] stream: + +``` + +===================+ + x--> Piping functions +--> src.pipe(dest) | + x are set up during |===================| + x the .pipe method. | Event callbacks | + +===============+ x |-------------------| + | Your Data | x They exist outside | .on('close', cb) | + +=======+=======+ x the data flow, but | .on('data', cb) | + | x importantly attach | .on('drain', cb) | + | x events, and their | .on('unpipe', cb) | ++---------v---------+ x respective callbacks. | .on('error', cb) | +| Readable Stream +----+ | .on('finish', cb) | ++-^-------^-------^-+ | | .on('end', cb) | + ^ | ^ | +-------------------+ + | | | | + | ^ | | + ^ ^ ^ | +-------------------+ +=================+ + ^ | ^ +----> Writable Stream +---------> .write(chunk) | + | | | +-------------------+ +=======+=========+ + | | | | + | ^ | +------------------v---------+ + ^ | +-> if (!chunk) | Is this chunk too big? | + ^ | | emit .end(); | Is the queue busy? | + | | +-> else +-------+----------------+---+ + | ^ | emit .write(); | | + | ^ ^ +--v---+ +---v---+ + | | ^-----------------------------------< No | | Yes | + ^ | +------+ +---v---+ + ^ | | + | ^ emit .pause(); +=================+ | + | ^---------------^-----------------------+ return false; <-----+---+ + | +=================+ | + | | + ^ when queue is empty +============+ | + ^------------^-----------------------< Buffering | | + | |============| | + +> emit .drain(); | ^Buffer^ | | + +> emit .resume(); +------------+ | + | ^Buffer^ | | + +------------+ add chunk to queue | + | <---^---------------------< + +============+ +``` + +Note: If you are setting up a pipeline to chain together a few streams to manipulate your data, you will most likely be implementing [`Transform`][] stream. + +In this case, your output from your [`Readable`][] stream will enter in the [`Transform`][] and will pipe into the [`Writable`][]. + +```javascript +Readable.pipe(Transformable).pipe(Writable); +``` + +Backpressure will be automatically applied, but note that both the incoming and outgoing `highWaterMark` of the [`Transform`][] stream may be manipulated and will effect the backpressure system. + +## Backpressure Guidelines + +Since [Node.js v0.10](https://nodejs.org/docs/v0.10.0/), the [`Stream`][] class has offered the ability to modify the behaviour of the [`.read()`][] or [`.write()`][] by using the underscore version of these respective functions ([`._read()`][] and [`._write()`][]). + +There are guidelines documented for [implementing Readable streams](https://nodejs.org/docs/latest/api/stream.html#stream_implementing_a_readable_stream) and [implementing Writable streams](https://nodejs.org/docs/latest/api/stream.html#stream_implementing_a_writable_stream). We will assume you've read these over, and the next section will go a little bit more in-depth. + +## Rules to Abide By When Implementing Custom Streams + +The golden rule of streams is **to always respect backpressure**. What constitutes as best practice is non-contradictory practice. So long as you are careful to avoid behaviours that conflict with internal backpressure support, you can be sure you're following good practice. + +In general, + +1. Never `.push()` if you are not asked. +2. Never call `.write()` after it returns false but wait for 'drain' instead. +3. Streams changes between different Node.js versions, and the library you use. Be careful and test things. + +Note: In regards to point 3, an incredibly useful package for building browser streams is [`readable-stream`][]. Rodd Vagg has written a [great blog post](https://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html) describing the utility of this library. In short, it provides a type of automated graceful degradation for [`Readable`][] streams, and supports older versions of browsers and Node.js. + +## Rules specific to Readable Streams + +So far, we have taken a look at how [`.write()`][] affects backpressure and have focused much on the [`Writable`][] stream. Because of Node.js' functionality, data is technically flowing downstream from [`Readable`][] to [`Writable`][]. However, as we can observe in any transmission of data, matter, or energy, the source is just as important as the destination and the [`Readable`][] stream is vital to how backpressure is handled. + +Both these processes rely on one another to communicate effectively, if the [`Readable`][] ignores when the [`Writable`][] stream asks for it to stop sending in data, it can be just as problematic to when the [`.write()`][]'s return value is incorrect. + +So, as well with respecting the [`.write()`][] return, we must also respect the return value of [`.push()`][] used in the [`._read()`][] method. If [`.push()`][] returns a `false` value, the stream will stop reading from the source. Otherwise, it will continue without pause. + +Here is an example of bad practice using [`.push()`][]: + +```javascript +// This is problematic as it completely ignores return value from push +// which may be a signal for backpressure from the destination stream! +class MyReadable extends Readable { + _read(size) { + let chunk; + while (null !== (chunk = getNextChunk())) { + this.push(chunk); + } + } +} +``` + +Additionally, from outside the custom stream, there are pratfalls for ignoring backpressure. In this counter-example of good practice, the application's code forces data through whenever it is available (signaled by the [`'data'` event][]): + +```javascript +// This ignores the backpressure mechanisms Node.js has set in place, +// and unconditionally pushes through data, regardless if the +// destination stream is ready for it or not. +readable.on('data', (data) => + writable.write(data) +); +``` + +## Rules specific to Writable Streams + +Recall that a [`.write()`][] may return true or false dependent on some conditions. Luckily for us, when building our own [`Writable`][] stream, the [`stream state machine`][] will handle our callbacks and determine when to handle backpressure and optimize the flow of data for us. + +However, when we want to use a [`Writable`][] directly, we must respect the [`.write()`][] return value and pay close attention to these conditions: + +* If the write queue is busy, [`.write()`][] will return false. +* If the data chunk is too large, [`.write()`][] will return false (the limit is indicated by the variable, [`highWaterMark`][]). +```javascript +// This writable is invalid because of the async nature of JavaScript callbacks. +// Without a return statement for each callback prior to the last, +// there is a great chance multiple callbacks will be called. +class MyWritable extends Writable { + _write(chunk, encoding, callback) { + if (chunk.toString().indexOf('a') >= 0) + callback(); + else if (chunk.toString().indexOf('b') >= 0) + callback(); + callback(); + } +} + +// The proper way to write this would be: + if (chunk.contains('a')) + return callback(); + if (chunk.contains('b')) + return callback(); + callback(); +``` + +There are also some things to look out for when implementing [`._writev()`][]. The function is coupled with [`.cork()`][], but there is a common mistake when writing: + +```javascript +// Using .uncork() twice here makes two calls on the C++ layer, rendering the +// cork/uncork technique useless. +ws.cork(); +ws.write('hello '); +ws.write('world '); +ws.uncork(); + +ws.cork(); +ws.write('from '); +ws.write('Matteo'); +ws.uncork(); + +// The correct way to write this is to utilize process.nextTick(), which fires +// on the next event loop. +ws.cork(); +ws.write('hello '); +ws.write('world '); +process.nextTick(doUncork, ws); + +ws.cork(); +ws.write('from '); +ws.write('Matteo'); +process.nextTick(doUncork, ws); + +// As a global function. +function doUncork(stream) { + stream.uncork(); +} +``` + +[`.cork()`][] can be called as many times we want, we just need to be careful to call [`.uncork()`][] the same amount of times to make it flow again. + +## Conclusion + +Streams are an often used module in Node.js. They are important to the internal structure, and for developers, to expand and connect across the Node.js modules ecosystem. + +Hopefully, you will now be able to troubleshoot, safely code your own [`Writable`][] and [`Readable`][] streams with backpressure in mind, and share your knowledge with colleagues and friends. + +Be sure to read up more on [`Stream`][] for other API functions to help improve and unleash your streaming capabilities when building an application with Node.js. diff --git a/locale/es/docs/guides/blocking-vs-non-blocking.md b/locale/es/docs/guides/blocking-vs-non-blocking.md new file mode 100644 index 000000000000..579d2c912e5c --- /dev/null +++ b/locale/es/docs/guides/blocking-vs-non-blocking.md @@ -0,0 +1,103 @@ +--- +title: Overview of Blocking vs Non-Blocking +layout: docs.hbs +--- + +# Overview of Blocking vs Non-Blocking + +This overview covers the difference between **blocking** and **non-blocking** calls in Node.js. This overview will refer to the event loop and libuv but no prior knowledge of those topics is required. Readers are assumed to have a basic understanding of the JavaScript language and Node.js [callback pattern](/en/knowledge/getting-started/control-flow/what-are-callbacks/). + +> "I/O" refers primarily to interaction with the system's disk and network supported by [libuv](https://libuv.org/). + +## Blocking + +**Blocking** is when the execution of additional JavaScript in the Node.js process must wait until a non-JavaScript operation completes. This happens because the event loop is unable to continue running JavaScript while a **blocking** operation is occurring. + +In Node.js, JavaScript that exhibits poor performance due to being CPU intensive rather than waiting on a non-JavaScript operation, such as I/O, isn't typically referred to as **blocking**. Synchronous methods in the Node.js standard library that use libuv are the most commonly used **blocking** operations. Native modules may also have **blocking** methods. + +All of the I/O methods in the Node.js standard library provide asynchronous versions, which are **non-blocking**, and accept callback functions. Some methods also have **blocking** counterparts, which have names that end with `Sync`. + +## Comparing Code + +**Blocking** methods execute **synchronously** and **non-blocking** methods execute **asynchronously**. + +Using the File System module as an example, this is a **synchronous** file read: + +```js +const fs = require('fs'); +const data = fs.readFileSync('/file.md'); // blocks here until file is read +``` + +And here is an equivalent **asynchronous** example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; +}); +``` + +The first example appears simpler than the second but has the disadvantage of the second line **blocking** the execution of any additional JavaScript until the entire file is read. Note that in the synchronous version if an error is thrown it will need to be caught or the process will crash. In the asynchronous version, it is up to the author to decide whether an error should throw as shown. + +Let's expand our example a little bit: + +```js +const fs = require('fs'); +const data = fs.readFileSync('/file.md'); // blocks here until file is read +console.log(data); +moreWork(); // will run after console.log +``` + +And here is a similar, but not equivalent asynchronous example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; + console.log(data); +}); +moreWork(); // will run before console.log +``` + +In the first example above, `console.log` will be called before `moreWork()`. In the second example `fs.readFile()` is **non-blocking** so JavaScript execution can continue and `moreWork()` will be called first. The ability to run `moreWork()` without waiting for the file read to complete is a key design choice that allows for higher throughput. + +## Concurrency and Throughput + +JavaScript execution in Node.js is single threaded, so concurrency refers to the event loop's capacity to execute JavaScript callback functions after completing other work. Any code that is expected to run in a concurrent manner must allow the event loop to continue running as non-JavaScript operations, like I/O, are occurring. + +As an example, let's consider a case where each request to a web server takes 50ms to complete and 45ms of that 50ms is database I/O that can be done asynchronously. Choosing **non-blocking** asynchronous operations frees up that 45ms per request to handle other requests. This is a significant difference in capacity just by choosing to use **non-blocking** methods instead of **blocking** methods. + +The event loop is different than models in many other languages where additional threads may be created to handle concurrent work. + +## Dangers of Mixing Blocking and Non-Blocking Code + +There are some patterns that should be avoided when dealing with I/O. Let's look at an example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; + console.log(data); +}); +fs.unlinkSync('/file.md'); +``` + +In the above example, `fs.unlinkSync()` is likely to be run before `fs.readFile()`, which would delete `file.md` before it is actually read. A better way to write this, which is completely **non-blocking** and guaranteed to execute in the correct order is: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (readFileErr, data) => { + if (readFileErr) throw readFileErr; + console.log(data); + fs.unlink('/file.md', (unlinkErr) => { + if (unlinkErr) throw unlinkErr; + }); +}); +``` + +The above places a **non-blocking** call to `fs.unlink()` within the callback of `fs.readFile()` which guarantees the correct order of operations. + +## Additional Resources + +* [libuv](https://libuv.org/) +* [About Node.js](/en/about/) diff --git a/locale/es/docs/guides/buffer-constructor-deprecation.md b/locale/es/docs/guides/buffer-constructor-deprecation.md new file mode 100644 index 000000000000..8f5611e2e430 --- /dev/null +++ b/locale/es/docs/guides/buffer-constructor-deprecation.md @@ -0,0 +1,220 @@ +--- +title: Porting to the Buffer.from()/Buffer.alloc() API +layout: docs.hbs +--- + +# Porting to the `Buffer.from()`/`Buffer.alloc()` API + +## Overview + +This guide explains how to migrate to safe `Buffer` constructor methods. The migration fixes the following deprecation warning: + +
+The Buffer() and new Buffer() constructors are not recommended for use due to security and usability concerns. Please use the new Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() construction methods instead. +
+ +* [Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x](#variant-1) (*recommended*) +* [Variant 2: Use a polyfill](#variant-2) +* [Variant 3: Manual detection, with safeguards](#variant-3) + +### Finding problematic bits of code using `grep` + +Just run `grep -nrE '[^a-zA-Z](Slow)?Buffer\s*\(' --exclude-dir node_modules`. + +It will find all the potentially unsafe places in your own code (with some considerably unlikely exceptions). + +### Finding problematic bits of code using Node.js 8 + +If you’re using Node.js ≥ 8.0.0 (which is recommended), Node.js exposes multiple options that help with finding the relevant pieces of code: + +* `--trace-warnings` will make Node.js show a stack trace for this warning and other warnings that are printed by Node.js. +* `--trace-deprecation` does the same thing, but only for deprecation warnings. +* `--pending-deprecation` will show more types of deprecation warnings. In particular, it will show the `Buffer()` deprecation warning, even on Node.js 8. + +You can set these flags using environment variables: + +```bash +$ export NODE_OPTIONS='--trace-warnings --pending-deprecation' +$ cat example.js +'use strict'; +const foo = new Buffer('foo'); +$ node example.js +(node:7147) [DEP0005] DeprecationWarning: The Buffer() and new Buffer() constructors are not recommended for use due to security and usability concerns. Please use the new Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() construction methods instead. + at showFlaggedDeprecation (buffer.js:127:13) + at new Buffer (buffer.js:148:3) + at Object. (/path/to/example.js:2:13) + [... more stack trace lines ...] +``` + +### Finding problematic bits of code using linters + +ESLint rules [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) also find calls to deprecated `Buffer()` API. Those rules are included in some presets. + +There is a drawback, though, that it doesn't always [work correctly](https://github.com/chalker/safer-buffer#why-not-safe-buffer) when `Buffer` is overridden e.g. with a polyfill, so recommended is a combination of this and some other method described above. + +## Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x + +This is the recommended solution nowadays that would imply only minimal overhead. + +The Node.js 5.x release line has been unsupported since July 2016, and the Node.js 4.x release line reaches its End of Life in April 2018 (→ [Schedule](https://github.com/nodejs/Release#release-schedule)). This means that these versions of Node.js will *not* receive any updates, even in case of security issues, so using these release lines should be avoided, if at all possible. + +What you would do in this case is to convert all `new Buffer()` or `Buffer()` calls to use `Buffer.alloc()` or `Buffer.from()`, in the following way: + +* For `new Buffer(number)`, replace it with `Buffer.alloc(number)`. +* For `new Buffer(string)` (or `new Buffer(string, encoding)`), replace it with `Buffer.from(string)` (or `Buffer.from(string, encoding)`). +* For all other combinations of arguments (these are much rarer), also replace `new Buffer(...arguments)` with `Buffer.from(...arguments)`. + +Note that `Buffer.alloc()` is also _faster_ on the current Node.js versions than `new Buffer(size).fill(0)`, which is what you would otherwise need to ensure zero-filling. + +Enabling ESLint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) is recommended to avoid accidental unsafe `Buffer` API usage. + +There is also a [JSCodeshift codemod](https://github.com/joyeecheung/node-dep-codemod#dep005) for automatically migrating `Buffer` constructors to `Buffer.alloc()` or `Buffer.from()`. Note that it currently only works with cases where the arguments are literals or where the constructor is invoked with two arguments. + +_If you currently support those older Node.js versions and dropping support for them is not possible, or if you support older branches of your packages, consider using [Variant 2](#variant-2) or [Variant 3](#variant-3) on older branches, so people using those older branches will also receive the fix. That way, you will eradicate potential issues caused by unguarded `Buffer` API usage and your users will not observe a runtime deprecation warning when running your code on Node.js 10._ + +## Variant 2: Use a polyfill + +There are three different polyfills available: + +* **[safer-buffer](https://www.npmjs.com/package/safer-buffer)** is a drop-in replacement for the entire `Buffer` API, that will _throw_ when using `new Buffer()`. + + You would take exactly the same steps as in [Variant 1](#variant-1), but with a polyfill `const Buffer = require('safer-buffer').Buffer` in all files where you use the new `Buffer` API. + + Do not use the old `new Buffer()` API. In any files where the line above is added, using old `new Buffer()` API will _throw_. + +* **[buffer-from](https://www.npmjs.com/package/buffer-from) and/or [buffer-alloc](https://www.npmjs.com/package/buffer-alloc)** are [ponyfills](https://ponyfill.com/) for their respective part of the `Buffer` API. You only need to add the package(s) corresponding to the API you are using. + + You would import the module needed with an appropriate name, e.g. `const bufferFrom = require('buffer-from')` and then use that instead of the call to `new Buffer()`, e.g. `new Buffer('test')` becomes `bufferFrom('test')`. + + A downside with this approach is slightly more code changes to migrate off them (as you would be using e.g. `Buffer.from()` under a different name). + +* **[safe-buffer](https://www.npmjs.com/package/safe-buffer)** is also a drop-in replacement for the entire `Buffer` API, but using `new Buffer()` will still work as before. + + A downside to this approach is that it will allow you to also use the older `new Buffer()` API in your code, which is problematic since it can cause issues in your code, and will start emitting runtime deprecation warnings starting with Node.js 10 ([read more here](https://github.com/chalker/safer-buffer#why-not-safe-buffer)). + +Note that in either case, it is important that you also remove all calls to the old `Buffer` API manually — just throwing in `safe-buffer` doesn't fix the problem by itself, it just provides a polyfill for the new API. I have seen people doing that mistake. + +Enabling ESLint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) is recommended. + +_Don't forget to drop the polyfill usage once you drop support for Node.js < 4.5.0._ + +## Variant 3 — Manual detection, with safeguards + +This is useful if you create `Buffer` instances in only a few places (e.g. one), or you have your own wrapper around them. + +### `Buffer(0)` + +This special case for creating empty buffers can be safely replaced with `Buffer.concat([])`, which returns the same result all the way down to Node.js 0.8.x. + +### `Buffer(notNumber)` + +Before: + +```js +const buf = new Buffer(notNumber, encoding); +``` + +After: + +```js +let buf; +if (Buffer.from && Buffer.from !== Uint8Array.from) { + buf = Buffer.from(notNumber, encoding); +} else { + if (typeof notNumber === 'number') { + throw new Error('The "size" argument must be not of type number.'); + } + buf = new Buffer(notNumber, encoding); +} +``` + +`encoding` is optional. + +Note that the `typeof notNumber` before `new Buffer()` is required (for cases when `notNumber` argument is not hard-coded) and _is not caused by the deprecation of `Buffer` constructor_ — it's exactly _why_ the `Buffer` constructor is deprecated. Ecosystem packages lacking this type-check caused numerous security issues — situations when unsanitized user input could end up in the `Buffer(arg)` create problems ranging from DoS to leaking sensitive information to the attacker from the process memory. + +When `notNumber` argument is hardcoded (e.g. literal `"abc"` or `[0,1,2]`), the `typeof` check can be omitted. + +Also, note that using TypeScript does not fix this problem for you — when libs written in `TypeScript` are used from JS, or when user input ends up there — it behaves exactly as pure JS, as all type checks are translation-time only and are not present in the actual JS code which TS compiles to. + +### `Buffer(number)` + +For Node.js 0.10.x (and below) support: + +```js +var buf; +if (Buffer.alloc) { + buf = Buffer.alloc(number); +} else { + buf = new Buffer(number); + buf.fill(0); +} +``` + +Otherwise (Node.js ≥ 0.12.x): + +```js +const buf = Buffer.alloc ? Buffer.alloc(number) : new Buffer(number).fill(0); +``` + +## Regarding `Buffer.allocUnsafe()` + +Be extra cautious when using `Buffer.allocUnsafe()`: + +* Don't use it if you don't have a good reason to + * e.g. you probably won't ever see a performance difference for small buffers, in fact, those might be even faster with `Buffer.alloc()`, + * if your code is not in the hot code path — you also probably won't notice a difference, + * keep in mind that zero-filling minimizes the potential risks. +* If you use it, make sure that you never return the buffer in a partially-filled state, + * if you are writing to it sequentially — always truncate it to the actual written length + +Errors in handling buffers allocated with `Buffer.allocUnsafe()` could result in various issues, ranged from undefined behavior of your code to sensitive data (user input, passwords, certs) leaking to the remote attacker. + +_Note that the same applies to `new Buffer()` usage without zero-filling, depending on the Node.js version (and lacking type checks also adds DoS to the list of potential problems)._ + +## FAQ + +### What is wrong with the + +`Buffer` constructor? + +The `Buffer` constructor could be used to create a buffer in many different ways: + +* `new Buffer(42)` creates a `Buffer` of 42 bytes. Before Node.js 8, this buffer contained *arbitrary memory* for performance reasons, which could include anything ranging from program source code to passwords and encryption keys. +* `new Buffer('abc')` creates a `Buffer` that contains the UTF-8-encoded version of the string `'abc'`. A second argument could specify another encoding: for example, `new Buffer(string, 'base64')` could be used to convert a Base64 string into the original sequence of bytes that it represents. +* There are several other combinations of arguments. + +This meant that in code like `var buffer = new Buffer(foo);`, *it is not possible to tell what exactly the contents of the generated buffer are* without knowing the type of `foo`. + +Sometimes, the value of `foo` comes from an external source. For example, this function could be exposed as a service on a web server, converting a UTF-8 string into its Base64 form: + +```js +function stringToBase64(req, res) { + // The request body should have the format of `{ string: 'foobar' }`. + const rawBytes = new Buffer(req.body.string); + const encoded = rawBytes.toString('base64'); + res.end({ encoded }); +} +``` + +Note that this code does *not* validate the type of `req.body.string`: + +* `req.body.string` is expected to be a string. If this is the case, all goes well. +* `req.body.string` is controlled by the client that sends the request. +* If `req.body.string` is the *number* `50`, the `rawBytes` would be `50` bytes: + * Before Node.js 8, the content would be uninitialized + * After Node.js 8, the content would be `50` bytes with the value `0` + +Because of the missing type check, an attacker could intentionally send a number as part of the request. Using this, they can either: + +* Read uninitialized memory. This **will** leak passwords, encryption keys and other kinds of sensitive information. (Information leak) +* Force the program to allocate a large amount of memory. For example, when specifying `500000000` as the input value, each request will allocate 500MB of memory. This can be used to either exhaust the memory available of a program completely and make it crash, or slow it down significantly. (Denial of Service) + +Both of these scenarios are considered serious security issues in a real-world web server context. + +When using `Buffer.from(req.body.string)` instead, passing a number will always throw an exception instead, giving a controlled behavior that can always be handled by the program. + +### The + +`Buffer()` constructor has been deprecated for a while. Is this really an issue? + +Surveys of code in the `npm` ecosystem have shown that the `Buffer()` constructor is still widely used. This includes new code, and overall usage of such code has actually been *increasing*. diff --git a/locale/es/docs/guides/debugging-getting-started.md b/locale/es/docs/guides/debugging-getting-started.md new file mode 100644 index 000000000000..5abb1fa1c270 --- /dev/null +++ b/locale/es/docs/guides/debugging-getting-started.md @@ -0,0 +1,189 @@ +--- +title: Debugging - Getting Started +layout: docs.hbs +--- + +# Debugging Guide + +This guide will help you get started debugging your Node.js apps and scripts. + +## Enable Inspector + +When started with the `--inspect` switch, a Node.js process listens for a debugging client. By default, it will listen at host and port 127.0.0.1:9229. Each process is also assigned a unique [UUID](https://tools.ietf.org/html/rfc4122). + +Inspector clients must know and specify host address, port, and UUID to connect. A full URL will look something like `ws://127.0.0.1:9229/0f2c936f-b1cd-4ac9-aab3-f63b0f33d55e`. + +Node.js will also start listening for debugging messages if it receives a `SIGUSR1` signal. (`SIGUSR1` is not available on Windows.) In Node.js 7 and earlier, this activates the legacy Debugger API. In Node.js 8 and later, it will activate the Inspector API. + +--- +## Security Implications + +Since the debugger has full access to the Node.js execution environment, a malicious actor able to connect to this port may be able to execute arbitrary code on behalf of the Node.js process. It is important to understand the security implications of exposing the debugger port on public and private networks. + +### Exposing the debug port publicly is unsafe + +If the debugger is bound to a public IP address, or to 0.0.0.0, any clients that can reach your IP address will be able to connect to the debugger without any restriction and will be able to run arbitrary code. + +By default `node --inspect` binds to 127.0.0.1. You explicitly need to provide a public IP address or 0.0.0.0, etc., if you intend to allow external connections to the debugger. Doing so may expose you to a potentially significant security threat. We suggest you ensure appropriate firewalls and access controls in place to prevent a security exposure. + +See the section on '[Enabling remote debugging scenarios](#enabling-remote-debugging-scenarios)' on some advice on how to safely allow remote debugger clients to connect. + +### Local applications have full access to the inspector + +Even if you bind the inspector port to 127.0.0.1 (the default), any applications running locally on your machine will have unrestricted access. This is by design to allow local debuggers to be able to attach conveniently. + +### Browsers, WebSockets and same-origin policy + +Websites open in a web-browser can make WebSocket and HTTP requests under the browser security model. An initial HTTP connection is necessary to obtain a unique debugger session id. The same-origin-policy prevents websites from being able to make this HTTP connection. For additional security against [DNS rebinding attacks](https://en.wikipedia.org/wiki/DNS_rebinding), Node.js verifies that the 'Host' headers for the connection either specify an IP address or `localhost` or `localhost6` precisely. + +These security policies disallow connecting to a remote debug server by specifying the hostname. You can work-around this restriction by specifying either the IP address or by using ssh tunnels as described below. + +## Inspector Clients + +Several commercial and open source tools can connect to the Node.js Inspector. Basic info on these follows: + +### [node-inspect](https://github.com/nodejs/node-inspect) + +* CLI Debugger supported by the Node.js Foundation which uses the [Inspector Protocol](https://chromedevtools.github.io/debugger-protocol-viewer/v8/). +* A version is bundled with Node.js and can be used with `node inspect myscript.js`. +* The latest version can also be installed independently (e.g. `npm install -g node-inspect`) and used with `node-inspect myscript.js`. + +### [Chrome DevTools](https://github.com/ChromeDevTools/devtools-frontend) 55+, [Microsoft Edge](https://www.microsoftedgeinsider.com) + +* **Option 1**: Open `chrome://inspect` in a Chromium-based browser or `edge://inspect` in Edge. Click the Configure button and ensure your target host and port are listed. +* **Option 2**: Copy the `devtoolsFrontendUrl` from the output of `/json/list` (see above) or the --inspect hint text and paste into Chrome. + +### [Visual Studio Code](https://github.com/microsoft/vscode) 1.10+ + +* In the Debug panel, click the settings icon to open `.vscode/launch.json`. Select "Node.js" for initial setup. + +### [Visual Studio](https://github.com/Microsoft/nodejstools) 2017 + +* Choose "Debug > Start Debugging" from the menu or hit F5. +* [Detailed instructions](https://github.com/Microsoft/nodejstools/wiki/Debugging). + +### [JetBrains WebStorm](https://www.jetbrains.com/webstorm/) 2017.1+ and other JetBrains IDEs + +* Create a new Node.js debug configuration and hit Debug. `--inspect` will be used by default for Node.js 7+. To disable uncheck `js.debugger.node.use.inspect` in the IDE Registry. + +### [chrome-remote-interface](https://github.com/cyrus-and/chrome-remote-interface) + +* Library to ease connections to Inspector Protocol endpoints. + +### [Gitpod](https://www.gitpod.io) + +* Start a Node.js debug configuration from the `Debug` view or hit `F5`. [Detailed instructions](https://medium.com/gitpod/debugging-node-js-applications-in-theia-76c94c76f0a1) + +### [Eclipse IDE](https://eclipse.org/eclipseide) with Eclipse Wild Web Developer extension + +* From a .js file, choose "Debug As... > Node program", or +* Create a Debug Configuration to attach debugger to running Node.js application (already started with `--inspect`). + +--- + +## Command-line options + +The following table lists the impact of various runtime flags on debugging: + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FlagMeaning
--inspect +
    +
  • Enable inspector agent
  • +
  • Listen on default address and port (127.0.0.1:9229)
  • +
+
--inspect=[host:port] +
    +
  • Enable inspector agent
  • +
  • Bind to address or hostname host (default: 127.0.0.1)
  • +
  • Listen on port port (default: 9229)
  • +
+
--inspect-brk +
    +
  • Enable inspector agent
  • +
  • Listen on default address and port (127.0.0.1:9229)
  • +
  • Break before user code starts
  • +
+
--inspect-brk=[host:port] +
    +
  • Enable inspector agent
  • +
  • Bind to address or hostname host (default: 127.0.0.1)
  • +
  • Listen on port port (default: 9229)
  • +
  • Break before user code starts
  • +
+
node inspect script.js +
    +
  • Spawn child process to run user's script under --inspect flag; + and use main process to run CLI debugger.
  • +
+
node inspect --port=xxxx script.js +
    +
  • Spawn child process to run user's script under --inspect flag; + and use main process to run CLI debugger.
  • +
  • Listen on port port (default: 9229)
  • +
+
+ +--- + +## Enabling remote debugging scenarios + +We recommend that you never have the debugger listen on a public IP address. If you need to allow remote debugging connections we recommend the use of ssh tunnels instead. We provide the following example for illustrative purposes only. Please understand the security risk of allowing remote access to a privileged service before proceeding. + +Let's say you are running Node.js on a remote machine, remote.example.com, that you want to be able to debug. On that machine, you should start the node process with the inspector listening only to localhost (the default). + +```bash +node --inspect server.js +``` + +Now, on your local machine from where you want to initiate a debug client connection, you can setup an ssh tunnel: + +```bash +ssh -L 9221:localhost:9229 user@remote.example.com +``` + +This starts a ssh tunnel session where a connection to port 9221 on your local machine will be forwarded to port 9229 on remote.example.com. You can now attach a debugger such as Chrome DevTools or Visual Studio Code to localhost:9221, which should be able to debug as if the Node.js application was running locally. + +--- + +## Legacy Debugger + +**The legacy debugger has been deprecated as of Node.js 7.7.0. Please use `--inspect` and Inspector instead.** + +When started with the **--debug** or **--debug-brk** switches in version 7 and earlier, Node.js listens for debugging commands defined by the discontinued V8 Debugging Protocol on a TCP port, by default `5858`. Any debugger client which speaks this protocol can connect to and debug the running process; a couple popular ones are listed below. + +The V8 Debugging Protocol is no longer maintained or documented. + +### [Built-in Debugger](https://nodejs.org/dist/latest-v6.x/docs/api/debugger.html) + +Start `node debug script_name.js` to start your script under the builtin command-line debugger. Your script starts in another Node.js process started with the `--debug-brk` option, and the initial Node.js process runs the `_debugger.js` script and connects to your target. + +### [node-inspector](https://github.com/node-inspector/node-inspector) + +Debug your Node.js app with Chrome DevTools by using an intermediary process which translates the Inspector Protocol used in Chromium to the V8 Debugger protocol used in Node.js. + + diff --git a/locale/es/docs/guides/diagnostics-flamegraph.md b/locale/es/docs/guides/diagnostics-flamegraph.md new file mode 100644 index 000000000000..678d5040802a --- /dev/null +++ b/locale/es/docs/guides/diagnostics-flamegraph.md @@ -0,0 +1,121 @@ +--- +title: Diagnostics - Flame Graphs +layout: docs.hbs +--- + +# Flame Graphs + +## What's a flame graph useful for? + +Flame graphs are a way of visualizing CPU time spent in functions. They can help you pin down where you spend too much time doing synchronous operations. + +## How to create a flame graph + +You might have heard creating a flame graph for Node.js is difficult, but that's not true (anymore). Solaris vms are no longer needed for flame graphs! + +Flame graphs are generated from `perf` output, which is not a node-specific tool. While it's the most powerful way to visualize CPU time spent, it may have issues with how JavaScript code is optimized in Node.js 8 and above. See [perf output issues](#perf-output-issues) section below. + +### Use a pre-packaged tool + +If you want a single step that produces a flame graph locally, try [0x](https://www.npmjs.com/package/0x) + +For diagnosing production deployments, read these notes: [0x production servers](https://github.com/davidmarkclements/0x/blob/master/docs/production-servers.md) + +### Create a flame graph with system perf tools + +The purpose of this guide is to show steps involved in creating a flame graph and keep you in control of each step. + +If you want to understand each step better, take a look at the sections that follow where we go into more detail. + +Now let's get to work. + +1. Install `perf` (usually available through the linux-tools-common package if not already installed) +2. try running `perf` - it might complain about missing kernel modules, install them too +3. run node with perf enabled (see [perf output issues](#perf-output-issues) for tips specific to Node.js versions) + + ```bash + perf record -e cycles:u -g -- node --perf-basic-prof app.js + ``` + +4. disregard warnings unless they're saying you can't run perf due to missing packages; you may get some warnings about not being able to access kernel module samples which you're not after anyway. +5. Run `perf script > perfs.out` to generate the data file you'll visualize in a moment. It's useful to [apply some cleanup](#filtering-out-node-js-internal-functions) for a more readable graph +6. install stackvis if not yet installed `npm i -g stackvis` +7. run `stackvis perf < perfs.out > flamegraph.htm` + +Now open the flame graph file in your favorite browser and watch it burn. It's color-coded so you can focus on the most saturated orange bars first. They're likely to represent CPU heavy functions. + +Worth mentioning - if you click an element of a flame graph a zoom-in of its surroundings will get displayed above the graph. + +### Using `perf` to sample a running process + +This is great for recording flame graph data from an already running process that you don't want to interrupt. Imagine a production process with a hard to reproduce issue. + +```bash +perf record -F99 -p `pgrep -n node` -g -- sleep 3 +``` + +Wait, what is that `sleep 3` for? It's there to keep the perf running - despite `-p` option pointing to a different pid, the command needs to be executed on a process and end with it. perf runs for the life of the command you pass to it, whether or not you're actually profiling that command. `sleep 3` ensures that perf runs for 3 seconds. + +Why is `-F` (profiling frequency) set to 99? It's a reasonable default. You can adjust if you want. `-F99` tells perf to take 99 samples per second, for more precision increase the value. Lower values should produce less output with less precise results. Precision you need depends on how long your CPU intensive functions really run. If you're looking for the reason of a noticeable slowdown, 99 frames per second should be more than enough. + +After you get that 3 second perf record, proceed with generating the flame graph with the last two steps from above. + +### Filtering out Node.js internal functions + +Usually you just want to look at the performance of your own calls, so filtering out Node.js and V8 internal functions can make the graph much easier to read. You can clean up your perf file with: + +```bash +sed -i \ + -e "/( __libc_start| LazyCompile | v8::internal::| Builtin:| Stub:| LoadIC:|\[unknown\]| LoadPolymorphicIC:)/d" \ + -e 's/ LazyCompile:[*~]\?/ /' \ + perfs.out +``` + +If you read your flame graph and it seems odd, as if something is missing in the key function taking up most time, try generating your flame graph without the filters - maybe you got a rare case of an issue with Node.js itself. + +### Node.js's profiling options + +`--perf-basic-prof-only-functions` and `--perf-basic-prof` are the two that are useful for debugging your JavaScript code. Other options are used for profiling Node.js itself, which is outside the scope of this guide. + +`--perf-basic-prof-only-functions` produces less output, so it's the option with least overhead. + +### Why do I need them at all? + +Well, without these options you'll still get a flame graph, but with most bars labeled `v8::Function::Call`. + +## `perf` output issues + +### Node.js 8.x V8 pipeline changes + +Node.js 8.x and above ships with new optimizations to JavaScript compilation pipeline in V8 engine which makes function names/references unreachable for perf sometimes. (It's called Turbofan) + +The result is you might not get your function names right in the flame graph. + +You'll notice `ByteCodeHandler:` where you'd expect function names. + +[0x](https://www.npmjs.com/package/0x) has some mitigations for that built in. + +For details see: + +* https://github.com/nodejs/benchmarking/issues/168 +* https://github.com/nodejs/diagnostics/issues/148#issuecomment-369348961 + +### Node.js 10+ + +Node.js 10.x addresses the issue with Turbofan using the `--interpreted-frames-native-stack` flag. + +Run `node --interpreted-frames-native-stack --perf-basic-prof-only-functions` to get function names in the flame graph regardless of which pipeline V8 used to compile your JavaScript. + +### Broken labels in the flame graph + +If you're seeing labels looking like this + +``` +node`_ZN2v88internal11interpreter17BytecodeGenerator15VisitStatementsEPNS0_8ZoneListIPNS0_9StatementEEE +``` + +it means the Linux perf you're using was not compiled with demangle support, see https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1396654 for example + +## Examples + +Practice capturing flame graphs yourself with [a flame graph exercise](https://github.com/naugtur/node-example-flamegraph)! diff --git a/locale/es/docs/guides/domain-postmortem.md b/locale/es/docs/guides/domain-postmortem.md new file mode 100644 index 000000000000..243b24a9e760 --- /dev/null +++ b/locale/es/docs/guides/domain-postmortem.md @@ -0,0 +1,338 @@ +--- +title: Domain Module Postmortem +layout: docs.hbs +--- + +# Domain Module Postmortem + +## Usability Issues + +### Implicit Behavior + +It's possible for a developer to create a new domain and then simply run `domain.enter()`. Which then acts as a catch-all for any exception in the future that couldn't be observed by the thrower. Allowing a module author to intercept the exceptions of unrelated code in a different module. Preventing the originator of the code from knowing about its own exceptions. + +Here's an example of how one indirectly linked modules can affect another: + +```js +// module a.js +const b = require('./b'); +const c = require('./c'); + +// module b.js +const d = require('domain').create(); +d.on('error', () => { /* silence everything */ }); +d.enter(); + +// module c.js +const dep = require('some-dep'); +dep.method(); // Uh-oh! This method doesn't actually exist. +``` + +Since module `b` enters the domain but never exits any uncaught exception will be swallowed. Leaving module `c` in the dark as to why it didn't run the entire script. Leaving a potentially partially populated `module.exports`. Doing this is not the same as listening for `'uncaughtException'`. As the latter is explicitly meant to globally catch errors. The other issue is that domains are processed prior to any `'uncaughtException'` handlers, and prevent them from running. + +Another issue is that domains route errors automatically if no `'error'` handler was set on the event emitter. There is no opt-in mechanism for this, and automatically propagates across the entire asynchronous chain. This may seem useful at first, but once asynchronous calls are two or more modules deep and one of them doesn't include an error handler the creator of the domain will suddenly be catching unexpected exceptions, and the thrower's exception will go unnoticed by the author. + +The following is a simple example of how a missing `'error'` handler allows the active domain to hijack the error: + +```js +const domain = require('domain'); +const net = require('net'); +const d = domain.create(); +d.on('error', (err) => console.error(err.message)); + +d.run(() => net.createServer((c) => { + c.end(); + c.write('bye'); +}).listen(8000)); +``` + +Even manually removing the connection via `d.remove(c)` does not prevent the connection's error from being automatically intercepted. + +Failures that plagues both error routing and exception handling are the inconsistencies in how errors are bubbled. The following is an example of how nested domains will and won't bubble the exception based on when they happen: + +```js +const domain = require('domain'); +const net = require('net'); +const d = domain.create(); +d.on('error', () => console.error('d intercepted an error')); + +d.run(() => { + const server = net.createServer((c) => { + const e = domain.create(); // No 'error' handler being set. + e.run(() => { + // This will not be caught by d's error handler. + setImmediate(() => { + throw new Error('thrown from setImmediate'); + }); + // Though this one will bubble to d's error handler. + throw new Error('immediately thrown'); + }); + }).listen(8080); +}); +``` + +It may be expected that nested domains always remain nested, and will always propagate the exception up the domain stack. Or that exceptions will never automatically bubble. Unfortunately both these situations occur, leading to potentially confusing behavior that may even be prone to difficult to debug timing conflicts. + +### API Gaps + +While APIs based on using `EventEmitter` can use `bind()` and errback style callbacks can use `intercept()`, alternative APIs that implicitly bind to the active domain must be executed inside of `run()`. Meaning if module authors wanted to support domains using a mechanism alternative to those mentioned they must manually implement domain support themselves. Instead of being able to leverage the implicit mechanisms already in place. + +### Error Propagation + +Propagating errors across nested domains is not straight forward, if even possible. Existing documentation shows a simple example of how to `close()` an `http` server if there is an error in the request handler. What it does not explain is how to close the server if the request handler creates another domain instance for another async request. Using the following as a simple example of the failing of error propagation: + +```js +const d1 = domain.create(); +d1.foo = true; // custom member to make more visible in console +d1.on('error', (er) => { /* handle error */ }); + +d1.run(() => setTimeout(() => { + const d2 = domain.create(); + d2.bar = 43; + d2.on('error', (er) => console.error(er.message, domain._stack)); + d2.run(() => { + setTimeout(() => { + setTimeout(() => { + throw new Error('outer'); + }); + throw new Error('inner'); + }); + }); +})); +``` + +Even in the case that the domain instances are being used for local storage so access to resources are made available there is still no way to allow the error to continue propagating from `d2` back to `d1`. Quick inspection may tell us that simply throwing from `d2`'s domain `'error'` handler would allow `d1` to then catch the exception and execute its own error handler. Though that is not the case. Upon inspection of `domain._stack` you'll see that the stack only contains `d2`. + +This may be considered a failing of the API, but even if it did operate in this way there is still the issue of transmitting the fact that a branch in the asynchronous execution has failed, and that all further operations in that branch must cease. In the example of the http request handler, if we fire off several asynchronous requests and each one then `write()`'s data back to the client many more errors will arise from attempting to `write()` to a closed handle. More on this in _Resource Cleanup on Exception_. + +### Resource Cleanup on Exception + +The following script contains a more complex example of properly cleaning up in a small resource dependency tree in the case that an exception occurs in a given connection or any of its dependencies. Breaking down the script into its basic operations: + +```js +'use strict'; + +const domain = require('domain'); +const EE = require('events'); +const fs = require('fs'); +const net = require('net'); +const util = require('util'); +const print = process._rawDebug; + +const pipeList = []; +const FILENAME = '/tmp/tmp.tmp'; +const PIPENAME = '/tmp/node-domain-example-'; +const FILESIZE = 1024; +let uid = 0; + +// Setting up temporary resources +const buf = Buffer.alloc(FILESIZE); +for (let i = 0; i < buf.length; i++) + buf[i] = ((Math.random() * 1e3) % 78) + 48; // Basic ASCII +fs.writeFileSync(FILENAME, buf); + +function ConnectionResource(c) { + EE.call(this); + this._connection = c; + this._alive = true; + this._domain = domain.create(); + this._id = Math.random().toString(32).substr(2).substr(0, 8) + (++uid); + + this._domain.add(c); + this._domain.on('error', () => { + this._alive = false; + }); +} +util.inherits(ConnectionResource, EE); + +ConnectionResource.prototype.end = function end(chunk) { + this._alive = false; + this._connection.end(chunk); + this.emit('end'); +}; + +ConnectionResource.prototype.isAlive = function isAlive() { + return this._alive; +}; + +ConnectionResource.prototype.id = function id() { + return this._id; +}; + +ConnectionResource.prototype.write = function write(chunk) { + this.emit('data', chunk); + return this._connection.write(chunk); +}; + +// Example begin +net.createServer((c) => { + const cr = new ConnectionResource(c); + + const d1 = domain.create(); + fs.open(FILENAME, 'r', d1.intercept((fd) => { + streamInParts(fd, cr, 0); + })); + + pipeData(cr); + + c.on('close', () => cr.end()); +}).listen(8080); + +function streamInParts(fd, cr, pos) { + const d2 = domain.create(); + const alive = true; + d2.on('error', (er) => { + print('d2 error:', er.message); + cr.end(); + }); + fs.read(fd, Buffer.alloc(10), 0, 10, pos, d2.intercept((bRead, buf) => { + if (!cr.isAlive()) { + return fs.close(fd); + } + if (cr._connection.bytesWritten < FILESIZE) { + // Documentation says callback is optional, but doesn't mention that if + // the write fails an exception will be thrown. + const goodtogo = cr.write(buf); + if (goodtogo) { + setTimeout(() => streamInParts(fd, cr, pos + bRead), 1000); + } else { + cr._connection.once('drain', () => streamInParts(fd, cr, pos + bRead)); + } + return; + } + cr.end(buf); + fs.close(fd); + })); +} + +function pipeData(cr) { + const pname = PIPENAME + cr.id(); + const ps = net.createServer(); + const d3 = domain.create(); + const connectionList = []; + d3.on('error', (er) => { + print('d3 error:', er.message); + cr.end(); + }); + d3.add(ps); + ps.on('connection', (conn) => { + connectionList.push(conn); + conn.on('data', () => {}); // don't care about incoming data. + conn.on('close', () => { + connectionList.splice(connectionList.indexOf(conn), 1); + }); + }); + cr.on('data', (chunk) => { + for (let i = 0; i < connectionList.length; i++) { + connectionList[i].write(chunk); + } + }); + cr.on('end', () => { + for (let i = 0; i < connectionList.length; i++) { + connectionList[i].end(); + } + ps.close(); + }); + pipeList.push(pname); + ps.listen(pname); +} + +process.on('SIGINT', () => process.exit()); +process.on('exit', () => { + try { + for (let i = 0; i < pipeList.length; i++) { + fs.unlinkSync(pipeList[i]); + } + fs.unlinkSync(FILENAME); + } catch (e) { } +}); + +``` + +* When a new connection happens, concurrently: + * Open a file on the file system + * Open Pipe to unique socket +* Read a chunk of the file asynchronously +* Write chunk to both the TCP connection and any listening sockets +* If any of these resources error, notify all other attached resources that they need to clean up and shutdown + +As we can see from this example a lot more must be done to properly clean up resources when something fails than what can be done strictly through the domain API. All that domains offer is an exception aggregation mechanism. Even the potentially useful ability to propagate data with the domain is easily countered, in this example, by passing the needed resources as a function argument. + +One problem domains perpetuated was the supposed simplicity of being able to continue execution, contrary to what the documentation stated, of the application despite an unexpected exception. This example demonstrates the fallacy behind that idea. + +Attempting proper resource cleanup on unexpected exception becomes more complex as the application itself grows in complexity. This example only has 3 basic resources in play, and all of them with a clear dependency path. If an application uses something like shared resources or resource reuse the ability to cleanup, and properly test that cleanup has been done, grows greatly. + +In the end, in terms of handling errors, domains aren't much more than a glorified `'uncaughtException'` handler. Except with more implicit and unobservable behavior by third-parties. + +### Resource Propagation + +Another use case for domains was to use it to propagate data along asynchronous data paths. One problematic point is the ambiguity of when to expect the correct domain when there are multiple in the stack (which must be assumed if the async stack works with other modules). Also the conflict between being able to depend on a domain for error handling while also having it available to retrieve the necessary data. + +The following is a involved example demonstrating the failing using domains to propagate data along asynchronous stacks: + +```js +const domain = require('domain'); +const net = require('net'); + +const server = net.createServer((c) => { + // Use a domain to propagate data across events within the + // connection so that we don't have to pass arguments + // everywhere. + const d = domain.create(); + d.data = { connection: c }; + d.add(c); + // Mock class that does some useless async data transformation + // for demonstration purposes. + const ds = new DataStream(dataTransformed); + c.on('data', (chunk) => ds.data(chunk)); +}).listen(8080, () => console.log('listening on 8080')); + +function dataTransformed(chunk) { + // FAIL! Because the DataStream instance also created a + // domain we have now lost the active domain we had + // hoped to use. + domain.active.data.connection.write(chunk); +} + +function DataStream(cb) { + this.cb = cb; + // DataStream wants to use domains for data propagation too! + // Unfortunately this will conflict with any domain that + // already exists. + this.domain = domain.create(); + this.domain.data = { inst: this }; +} + +DataStream.prototype.data = function data(chunk) { + // This code is self contained, but pretend it's a complex + // operation that crosses at least one other module. So + // passing along "this", etc., is not easy. + this.domain.run(() => { + // Simulate an async operation that does the data transform. + setImmediate(() => { + for (let i = 0; i < chunk.length; i++) + chunk[i] = ((chunk[i] + Math.random() * 100) % 96) + 33; + // Grab the instance from the active domain and use that + // to call the user's callback. + const self = domain.active.data.inst; + self.cb(chunk); + }); + }); +}; +``` + +The above shows that it is difficult to have more than one asynchronous API attempt to use domains to propagate data. This example could possibly be fixed by assigning `parent: domain.active` in the `DataStream` constructor. Then restoring it via `domain.active = domain.active.data.parent` just before the user's callback is called. Also the instantiation of `DataStream` in the `'connection'` callback must be run inside `d.run()`, instead of simply using `d.add(c)`, otherwise there will be no active domain. + +In short, for this to have a prayer of a chance usage would need to strictly adhere to a set of guidelines that would be difficult to enforce or test. + +## Performance Issues + +A significant deterrent from using domains is the overhead. Using node's built-in http benchmark, `http_simple.js`, without domains it can handle over 22,000 requests/second. Whereas if it's run with `NODE_USE_DOMAINS=1` that number drops down to under 17,000 requests/second. In this case there is only a single global domain. If we edit the benchmark so the http request callback creates a new domain instance performance drops further to 15,000 requests/second. + +While this probably wouldn't affect a server only serving a few hundred or even a thousand requests per second, the amount of overhead is directly proportional to the number of asynchronous requests made. So if a single connection needs to connect to several other services all of those will contribute to the overall latency of delivering the final product to the client. + +Using `AsyncWrap` and tracking the number of times `init`/`pre`/`post`/`destroy` are called in the mentioned benchmark we find that the sum of all events called is over 170,000 times per second. This means even adding 1 microsecond overhead per call for any type of setup or tear down will result in a 17% performance loss. Granted, this is for the optimized scenario of the benchmark, but I believe this demonstrates the necessity for a mechanism such as domain to be as cheap to run as possible. + +## Looking Ahead + +The domain module has been soft deprecated since Dec 2014, but has not yet been removed because node offers no alternative functionality at the moment. As of this writing there is ongoing work building out the `AsyncWrap` API and a proposal for Zones being prepared for the TC39. At such time there is suitable functionality to replace domains it will undergo the full deprecation cycle and eventually be removed from core. diff --git a/locale/es/docs/guides/dont-block-the-event-loop.md b/locale/es/docs/guides/dont-block-the-event-loop.md new file mode 100644 index 000000000000..539fafc8372d --- /dev/null +++ b/locale/es/docs/guides/dont-block-the-event-loop.md @@ -0,0 +1,385 @@ +--- +title: Don't Block the Event Loop (or the Worker Pool) +layout: docs.hbs +--- + +# Don't Block the Event Loop (or the Worker Pool) + +## Should you read this guide? +If you're writing anything more complicated than a brief command-line script, reading this should help you write higher-performance, more-secure applications. + +This document is written with Node.js servers in mind, but the concepts apply to complex Node.js applications as well. Where OS-specific details vary, this document is Linux-centric. + +## Summary +Node.js runs JavaScript code in the Event Loop (initialization and callbacks), and offers a Worker Pool to handle expensive tasks like file I/O. Node.js scales well, sometimes better than more heavyweight approaches like Apache. The secret to the scalability of Node.js is that it uses a small number of threads to handle many clients. If Node.js can make do with fewer threads, then it can spend more of your system's time and memory working on clients rather than on paying space and time overheads for threads (memory, context-switching). But because Node.js has only a few threads, you must structure your application to use them wisely. + +Here's a good rule of thumb for keeping your Node.js server speedy: *Node.js is fast when the work associated with each client at any given time is "small"*. + +This applies to callbacks on the Event Loop and tasks on the Worker Pool. + +## Why should I avoid blocking the Event Loop and the Worker Pool? +Node.js uses a small number of threads to handle many clients. In Node.js there are two types of threads: one Event Loop (aka the main loop, main thread, event thread, etc.), and a pool of `k` Workers in a Worker Pool (aka the threadpool). + +If a thread is taking a long time to execute a callback (Event Loop) or a task (Worker), we call it "blocked". While a thread is blocked working on behalf of one client, it cannot handle requests from any other clients. This provides two motivations for blocking neither the Event Loop nor the Worker Pool: + +1. Performance: If you regularly perform heavyweight activity on either type of thread, the *throughput* (requests/second) of your server will suffer. +2. Security: If it is possible that for certain input one of your threads might block, a malicious client could submit this "evil input", make your threads block, and keep them from working on other clients. This would be a [Denial of Service](https://en.wikipedia.org/wiki/Denial-of-service_attack) attack. + +## A quick review of Node + +Node.js uses the Event-Driven Architecture: it has an Event Loop for orchestration and a Worker Pool for expensive tasks. + +### What code runs on the Event Loop? +When they begin, Node.js applications first complete an initialization phase, `require`'ing modules and registering callbacks for events. Node.js applications then enter the Event Loop, responding to incoming client requests by executing the appropriate callback. This callback executes synchronously, and may register asynchronous requests to continue processing after it completes. The callbacks for these asynchronous requests will also be executed on the Event Loop. + +The Event Loop will also fulfill the non-blocking asynchronous requests made by its callbacks, e.g., network I/O. + +In summary, the Event Loop executes the JavaScript callbacks registered for events, and is also responsible for fulfilling non-blocking asynchronous requests like network I/O. + +### What code runs on the Worker Pool? +The Worker Pool of Node.js is implemented in libuv ([docs](http://docs.libuv.org/en/v1.x/threadpool.html)), which exposes a general task submission API. + +Node.js uses the Worker Pool to handle "expensive" tasks. This includes I/O for which an operating system does not provide a non-blocking version, as well as particularly CPU-intensive tasks. + +These are the Node.js module APIs that make use of this Worker Pool: + +1. I/O-intensive + 1. [DNS](https://nodejs.org/api/dns.html): `dns.lookup()`, `dns.lookupService()`. + 2. [File System](https://nodejs.org/api/fs.html#fs_threadpool_usage): All file system APIs except `fs.FSWatcher()` and those that are explicitly synchronous use libuv's threadpool. +2. CPU-intensive + 1. [Crypto](https://nodejs.org/api/crypto.html): `crypto.pbkdf2()`, `crypto.scrypt()`, `crypto.randomBytes()`, `crypto.randomFill()`, `crypto.generateKeyPair()`. + 2. [Zlib](https://nodejs.org/api/zlib.html#zlib_threadpool_usage): All zlib APIs except those that are explicitly synchronous use libuv's threadpool. + +In many Node.js applications, these APIs are the only sources of tasks for the Worker Pool. Applications and modules that use a [C++ add-on](https://nodejs.org/api/addons.html) can submit other tasks to the Worker Pool. + +For the sake of completeness, we note that when you call one of these APIs from a callback on the Event Loop, the Event Loop pays some minor setup costs as it enters the Node.js C++ bindings for that API and submits a task to the Worker Pool. These costs are negligible compared to the overall cost of the task, which is why the Event Loop is offloading it. When submitting one of these tasks to the Worker Pool, Node.js provides a pointer to the corresponding C++ function in the Node.js C++ bindings. + +### How does Node.js decide what code to run next? +Abstractly, the Event Loop and the Worker Pool maintain queues for pending events and pending tasks, respectively. + +In truth, the Event Loop does not actually maintain a queue. Instead, it has a collection of file descriptors that it asks the operating system to monitor, using a mechanism like [epoll](http://man7.org/linux/man-pages/man7/epoll.7.html) (Linux), [kqueue](https://developer.apple.com/library/content/documentation/Darwin/Conceptual/FSEvents_ProgGuide/KernelQueues/KernelQueues.html) (OSX), event ports (Solaris), or [IOCP](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198.aspx) (Windows). These file descriptors correspond to network sockets, any files it is watching, and so on. When the operating system says that one of these file descriptors is ready, the Event Loop translates it to the appropriate event and invokes the callback(s) associated with that event. You can learn more about this process [here](https://www.youtube.com/watch?v=P9csgxBgaZ8). + +In contrast, the Worker Pool uses a real queue whose entries are tasks to be processed. A Worker pops a task from this queue and works on it, and when finished the Worker raises an "At least one task is finished" event for the Event Loop. + +### What does this mean for application design? +In a one-thread-per-client system like Apache, each pending client is assigned its own thread. If a thread handling one client blocks, the operating system will interrupt it and give another client a turn. The operating system thus ensures that clients that require a small amount of work are not penalized by clients that require more work. + +Because Node.js handles many clients with few threads, if a thread blocks handling one client's request, then pending client requests may not get a turn until the thread finishes its callback or task. *The fair treatment of clients is thus the responsibility of your application*. This means that you shouldn't do too much work for any client in any single callback or task. + +This is part of why Node.js can scale well, but it also means that you are responsible for ensuring fair scheduling. The next sections talk about how to ensure fair scheduling for the Event Loop and for the Worker Pool. + +## Don't block the Event Loop +The Event Loop notices each new client connection and orchestrates the generation of a response. All incoming requests and outgoing responses pass through the Event Loop. This means that if the Event Loop spends too long at any point, all current and new clients will not get a turn. + +You should make sure you never block the Event Loop. In other words, each of your JavaScript callbacks should complete quickly. This of course also applies to your `await`'s, your `Promise.then`'s, and so on. + +A good way to ensure this is to reason about the ["computational complexity"](https://en.wikipedia.org/wiki/Time_complexity) of your callbacks. If your callback takes a constant number of steps no matter what its arguments are, then you'll always give every pending client a fair turn. If your callback takes a different number of steps depending on its arguments, then you should think about how long the arguments might be. + +Example 1: A constant-time callback. + +```javascript +app.get('/constant-time', (req, res) => { + res.sendStatus(200); +}); +``` + +Example 2: An `O(n)` callback. This callback will run quickly for small `n` and more slowly for large `n`. + +```javascript +app.get('/countToN', (req, res) => { + let n = req.query.n; + + // n iterations before giving someone else a turn + for (let i = 0; i < n; i++) { + console.log(`Iter ${i}`); + } + + res.sendStatus(200); +}); +``` + +Example 3: An `O(n^2)` callback. This callback will still run quickly for small `n`, but for large `n` it will run much more slowly than the previous `O(n)` example. + +```javascript +app.get('/countToN2', (req, res) => { + let n = req.query.n; + + // n^2 iterations before giving someone else a turn + for (let i = 0; i < n; i++) { + for (let j = 0; j < n; j++) { + console.log(`Iter ${i}.${j}`); + } + } + + res.sendStatus(200); +}); +``` + +### How careful should you be? +Node.js uses the Google V8 engine for JavaScript, which is quite fast for many common operations. Exceptions to this rule are regexps and JSON operations, discussed below. + +However, for complex tasks you should consider bounding the input and rejecting inputs that are too long. That way, even if your callback has large complexity, by bounding the input you ensure the callback cannot take more than the worst-case time on the longest acceptable input. You can then evaluate the worst-case cost of this callback and determine whether its running time is acceptable in your context. + +### Blocking the Event Loop: REDOS +One common way to block the Event Loop disastrously is by using a "vulnerable" [regular expression](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions). + +#### Avoiding vulnerable regular expressions +A regular expression (regexp) matches an input string against a pattern. We usually think of a regexp match as requiring a single pass through the input string --- `O(n)` time where `n` is the length of the input string. In many cases, a single pass is indeed all it takes. Unfortunately, in some cases the regexp match might require an exponential number of trips through the input string --- `O(2^n)` time. An exponential number of trips means that if the engine requires `x` trips to determine a match, it will need `2*x` trips if we add only one more character to the input string. Since the number of trips is linearly related to the time required, the effect of this evaluation will be to block the Event Loop. + +A *vulnerable regular expression* is one on which your regular expression engine might take exponential time, exposing you to [REDOS](https://www.owasp.org/index.php/Regular_expression_Denial_of_Service_-_ReDoS) on "evil input". Whether or not your regular expression pattern is vulnerable (i.e. the regexp engine might take exponential time on it) is actually a difficult question to answer, and varies depending on whether you're using Perl, Python, Ruby, Java, JavaScript, etc., but here are some rules of thumb that apply across all of these languages: + +1. Avoid nested quantifiers like `(a+)*`. V8's regexp engine can handle some of these quickly, but others are vulnerable. +2. Avoid OR's with overlapping clauses, like `(a|a)*`. Again, these are sometimes-fast. +3. Avoid using backreferences, like `(a.*) \1`. No regexp engine can guarantee evaluating these in linear time. +4. If you're doing a simple string match, use `indexOf` or the local equivalent. It will be cheaper and will never take more than `O(n)`. + +If you aren't sure whether your regular expression is vulnerable, remember that Node.js generally doesn't have trouble reporting a *match* even for a vulnerable regexp and a long input string. The exponential behavior is triggered when there is a mismatch but Node.js can't be certain until it tries many paths through the input string. + +#### A REDOS example +Here is an example vulnerable regexp exposing its server to REDOS: + +```javascript +app.get('/redos-me', (req, res) => { + let filePath = req.query.filePath; + + // REDOS + if (filePath.match(/(\/.+)+$/)) { + console.log('valid path'); + } + else { + console.log('invalid path'); + } + + res.sendStatus(200); +}); +``` + +The vulnerable regexp in this example is a (bad!) way to check for a valid path on Linux. It matches strings that are a sequence of "/"-delimited names, like "/a/b/c". It is dangerous because it violates rule 1: it has a doubly-nested quantifier. + +If a client queries with filePath `///.../\n` (100 /'s followed by a newline character that the regexp's "." won't match), then the Event Loop will take effectively forever, blocking the Event Loop. This client's REDOS attack causes all other clients not to get a turn until the regexp match finishes. + +For this reason, you should be leery of using complex regular expressions to validate user input. + +#### Anti-REDOS Resources +There are some tools to check your regexps for safety, like + +* [safe-regex](https://github.com/substack/safe-regex) +* [rxxr2](http://www.cs.bham.ac.uk/~hxt/research/rxxr2/). However, neither of these will catch all vulnerable regexps. + +Another approach is to use a different regexp engine. You could use the [node-re2](https://github.com/uhop/node-re2) module, which uses Google's blazing-fast [RE2](https://github.com/google/re2) regexp engine. But be warned, RE2 is not 100% compatible with V8's regexps, so check for regressions if you swap in the node-re2 module to handle your regexps. And particularly complicated regexps are not supported by node-re2. + +If you're trying to match something "obvious", like a URL or a file path, find an example in a [regexp library](http://www.regexlib.com) or use an npm module, e.g. [ip-regex](https://www.npmjs.com/package/ip-regex). + +### Blocking the Event Loop: Node.js core modules +Several Node.js core modules have synchronous expensive APIs, including: + +* [Encryption](https://nodejs.org/api/crypto.html) +* [Compression](https://nodejs.org/api/zlib.html) +* [File system](https://nodejs.org/api/fs.html) +* [Child process](https://nodejs.org/api/child_process.html) + +These APIs are expensive, because they involve significant computation (encryption, compression), require I/O (file I/O), or potentially both (child process). These APIs are intended for scripting convenience, but are not intended for use in the server context. If you execute them on the Event Loop, they will take far longer to complete than a typical JavaScript instruction, blocking the Event Loop. + +In a server, *you should not use the following synchronous APIs from these modules*: + +* Encryption: + * `crypto.randomBytes` (synchronous version) + * `crypto.randomFillSync` + * `crypto.pbkdf2Sync` + * You should also be careful about providing large input to the encryption and decryption routines. +* Compression: + * `zlib.inflateSync` + * `zlib.deflateSync` +* File system: + * Do not use the synchronous file system APIs. For example, if the file you access is in a [distributed file system](https://en.wikipedia.org/wiki/Clustered_file_system#Distributed_file_systems) like [NFS](https://en.wikipedia.org/wiki/Network_File_System), access times can vary widely. +* Child process: + * `child_process.spawnSync` + * `child_process.execSync` + * `child_process.execFileSync` + +This list is reasonably complete as of Node.js v9. + +### Blocking the Event Loop: JSON DOS +`JSON.parse` and `JSON.stringify` are other potentially expensive operations. While these are `O(n)` in the length of the input, for large `n` they can take surprisingly long. + +If your server manipulates JSON objects, particularly those from a client, you should be cautious about the size of the objects or strings you work with on the Event Loop. + +Example: JSON blocking. We create an object `obj` of size 2^21 and `JSON.stringify` it, run `indexOf` on the string, and then JSON.parse it. The `JSON.stringify`'d string is 50MB. It takes 0.7 seconds to stringify the object, 0.03 seconds to indexOf on the 50MB string, and 1.3 seconds to parse the string. + +```javascript +var obj = { a: 1 }; +var niter = 20; + +var before, str, pos, res, took; + +for (var i = 0; i < niter; i++) { + obj = { obj1: obj, obj2: obj }; // Doubles in size each iter +} + +before = process.hrtime(); +str = JSON.stringify(obj); +took = process.hrtime(before); +console.log('JSON.stringify took ' + took); + +before = process.hrtime(); +pos = str.indexOf('nomatch'); +took = process.hrtime(before); +console.log('Pure indexof took ' + took); + +before = process.hrtime(); +res = JSON.parse(str); +took = process.hrtime(before); +console.log('JSON.parse took ' + took); +``` + +There are npm modules that offer asynchronous JSON APIs. See for example: + +* [JSONStream](https://www.npmjs.com/package/JSONStream), which has stream APIs. +* [Big-Friendly JSON](https://www.npmjs.com/package/bfj), which has stream APIs as well as asynchronous versions of the standard JSON APIs using the partitioning-on-the-Event-Loop paradigm outlined below. + +### Complex calculations without blocking the Event Loop +Suppose you want to do complex calculations in JavaScript without blocking the Event Loop. You have two options: partitioning or offloading. + +#### Partitioning +You could *partition* your calculations so that each runs on the Event Loop but regularly yields (gives turns to) other pending events. In JavaScript it's easy to save the state of an ongoing task in a closure, as shown in example 2 below. + +For a simple example, suppose you want to compute the average of the numbers `1` to `n`. + +Example 1: Un-partitioned average, costs `O(n)` + +```javascript +for (let i = 0; i < n; i++) + sum += i; +let avg = sum / n; +console.log('avg: ' + avg); +``` + +Example 2: Partitioned average, each of the `n` asynchronous steps costs `O(1)`. + +```javascript +function asyncAvg(n, avgCB) { + // Save ongoing sum in JS closure. + var sum = 0; + function help(i, cb) { + sum += i; + if (i == n) { + cb(sum); + return; + } + + // "Asynchronous recursion". + // Schedule next operation asynchronously. + setImmediate(help.bind(null, i+1, cb)); + } + + // Start the helper, with CB to call avgCB. + help(1, function(sum){ + var avg = sum/n; + avgCB(avg); + }); +} + +asyncAvg(n, function(avg){ + console.log('avg of 1-n: ' + avg); +}); +``` + +You can apply this principle to array iterations and so forth. + +#### Offloading +If you need to do something more complex, partitioning is not a good option. This is because partitioning uses only the Event Loop, and you won't benefit from multiple cores almost certainly available on your machine. *Remember, the Event Loop should orchestrate client requests, not fulfill them itself.* For a complicated task, move the work off of the Event Loop onto a Worker Pool. + +##### How to offload +You have two options for a destination Worker Pool to which to offload work. + +1. You can use the built-in Node.js Worker Pool by developing a [C++ addon](https://nodejs.org/api/addons.html). On older versions of Node, build your C++ addon using [NAN](https://github.com/nodejs/nan), and on newer versions use [N-API](https://nodejs.org/api/n-api.html). [node-webworker-threads](https://www.npmjs.com/package/webworker-threads) offers a JavaScript-only way to access the Node.js Worker Pool. +2. You can create and manage your own Worker Pool dedicated to computation rather than the Node.js I/O-themed Worker Pool. The most straightforward ways to do this is using [Child Process](https://nodejs.org/api/child_process.html) or [Cluster](https://nodejs.org/api/cluster.html). + +You should *not* simply create a [Child Process](https://nodejs.org/api/child_process.html) for every client. You can receive client requests more quickly than you can create and manage children, and your server might become a [fork bomb](https://en.wikipedia.org/wiki/Fork_bomb). + +##### Downside of offloading +The downside of the offloading approach is that it incurs overhead in the form of *communication costs*. Only the Event Loop is allowed to see the "namespace" (JavaScript state) of your application. From a Worker, you cannot manipulate a JavaScript object in the Event Loop's namespace. Instead, you have to serialize and deserialize any objects you wish to share. Then the Worker can operate on its own copy of these object(s) and return the modified object (or a "patch") to the Event Loop. + +For serialization concerns, see the section on JSON DOS. + +##### Some suggestions for offloading +You may wish to distinguish between CPU-intensive and I/O-intensive tasks because they have markedly different characteristics. + +A CPU-intensive task only makes progress when its Worker is scheduled, and the Worker must be scheduled onto one of your machine's [logical cores](https://nodejs.org/api/os.html#os_os_cpus). If you have 4 logical cores and 5 Workers, one of these Workers cannot make progress. As a result, you are paying overhead (memory and scheduling costs) for this Worker and getting no return for it. + +I/O-intensive tasks involve querying an external service provider (DNS, file system, etc.) and waiting for its response. While a Worker with an I/O-intensive task is waiting for its response, it has nothing else to do and can be de-scheduled by the operating system, giving another Worker a chance to submit their request. Thus, *I/O-intensive tasks will be making progress even while the associated thread is not running*. External service providers like databases and file systems have been highly optimized to handle many pending requests concurrently. For example, a file system will examine a large set of pending write and read requests to merge conflicting updates and to retrieve files in an optimal order (e.g. see [these slides](http://researcher.ibm.com/researcher/files/il-AVISHAY/01-block_io-v1.3.pdf)). + +If you rely on only one Worker Pool, e.g. the Node.js Worker Pool, then the differing characteristics of CPU-bound and I/O-bound work may harm your application's performance. + +For this reason, you might wish to maintain a separate Computation Worker Pool. + +#### Offloading: conclusions +For simple tasks, like iterating over the elements of an arbitrarily long array, partitioning might be a good option. If your computation is more complex, offloading is a better approach: the communication costs, i.e. the overhead of passing serialized objects between the Event Loop and the Worker Pool, are offset by the benefit of using multiple cores. + +However, if your server relies heavily on complex calculations, you should think about whether Node.js is really a good fit. Node.js excels for I/O-bound work, but for expensive computation it might not be the best option. + +If you take the offloading approach, see the section on not blocking the Worker Pool. + +## Don't block the Worker Pool +Node.js has a Worker Pool composed of `k` Workers. If you are using the Offloading paradigm discussed above, you might have a separate Computational Worker Pool, to which the same principles apply. In either case, let us assume that `k` is much smaller than the number of clients you might be handling concurrently. This is in keeping with the "one thread for many clients" philosophy of Node.js, the secret to its scalability. + +As discussed above, each Worker completes its current Task before proceeding to the next one on the Worker Pool queue. + +Now, there will be variation in the cost of the Tasks required to handle your clients' requests. Some Tasks can be completed quickly (e.g. reading short or cached files, or producing a small number of random bytes), and others will take longer (e.g reading larger or uncached files, or generating more random bytes). Your goal should be to *minimize the variation in Task times*, and you should use *Task partitioning* to accomplish this. + +### Minimizing the variation in Task times +If a Worker's current Task is much more expensive than other Tasks, then it will be unavailable to work on other pending Tasks. In other words, *each relatively long Task effectively decreases the size of the Worker Pool by one until it is completed*. This is undesirable because, up to a point, the more Workers in the Worker Pool, the greater the Worker Pool throughput (tasks/second) and thus the greater the server throughput (client requests/second). One client with a relatively expensive Task will decrease the throughput of the Worker Pool, in turn decreasing the throughput of the server. + +To avoid this, you should try to minimize variation in the length of Tasks you submit to the Worker Pool. While it is appropriate to treat the external systems accessed by your I/O requests (DB, FS, etc.) as black boxes, you should be aware of the relative cost of these I/O requests, and should avoid submitting requests you can expect to be particularly long. + +Two examples should illustrate the possible variation in task times. + +#### Variation example: Long-running file system reads +Suppose your server must read files in order to handle some client requests. After consulting the Node.js [File system](https://nodejs.org/api/fs.html) APIs, you opted to use `fs.readFile()` for simplicity. However, `fs.readFile()` is ([currently](https://github.com/nodejs/node/pull/17054)) not partitioned: it submits a single `fs.read()` Task spanning the entire file. If you read shorter files for some users and longer files for others, `fs.readFile()` may introduce significant variation in Task lengths, to the detriment of Worker Pool throughput. + +For a worst-case scenario, suppose an attacker can convince your server to read an *arbitrary* file (this is a [directory traversal vulnerability](https://www.owasp.org/index.php/Path_Traversal)). If your server is running Linux, the attacker can name an extremely slow file: [`/dev/random`](http://man7.org/linux/man-pages/man4/random.4.html). For all practical purposes, `/dev/random` is infinitely slow, and every Worker asked to read from `/dev/random` will never finish that Task. An attacker then submits `k` requests, one for each Worker, and no other client requests that use the Worker Pool will make progress. + +#### Variation example: Long-running crypto operations +Suppose your server generates cryptographically secure random bytes using [`crypto.randomBytes()`](https://nodejs.org/api/crypto.html#crypto_crypto_randombytes_size_callback). `crypto.randomBytes()` is not partitioned: it creates a single `randomBytes()` Task to generate as many bytes as you requested. If you create fewer bytes for some users and more bytes for others, `crypto.randomBytes()` is another source of variation in Task lengths. + +### Task partitioning +Tasks with variable time costs can harm the throughput of the Worker Pool. To minimize variation in Task times, as far as possible you should *partition* each Task into comparable-cost sub-Tasks. When each sub-Task completes it should submit the next sub-Task, and when the final sub-Task completes it should notify the submitter. + +To continue the `fs.readFile()` example, you should instead use `fs.read()` (manual partitioning) or `ReadStream` (automatically partitioned). + +The same principle applies to CPU-bound tasks; the `asyncAvg` example might be inappropriate for the Event Loop, but it is well suited to the Worker Pool. + +When you partition a Task into sub-Tasks, shorter Tasks expand into a small number of sub-Tasks, and longer Tasks expand into a larger number of sub-Tasks. Between each sub-Task of a longer Task, the Worker to which it was assigned can work on a sub-Task from another, shorter, Task, thus improving the overall Task throughput of the Worker Pool. + +Note that the number of sub-Tasks completed is not a useful metric for the throughput of the Worker Pool. Instead, concern yourself with the number of *Tasks* completed. + +### Avoiding Task partitioning +Recall that the purpose of Task partitioning is to minimize the variation in Task times. If you can distinguish between shorter Tasks and longer Tasks (e.g. summing an array vs. sorting an array), you could create one Worker Pool for each class of Task. Routing shorter Tasks and longer Tasks to separate Worker Pools is another way to minimize Task time variation. + +In favor of this approach, partitioning Tasks incurs overhead (the costs of creating a Worker Pool Task representation and of manipulating the Worker Pool queue), and avoiding partitioning saves you the costs of additional trips to the Worker Pool. It also keeps you from making mistakes in partitioning your Tasks. + +The downside of this approach is that Workers in all of these Worker Pools will incur space and time overheads and will compete with each other for CPU time. Remember that each CPU-bound Task makes progress only while it is scheduled. As a result, you should only consider this approach after careful analysis. + +### Worker Pool: conclusions +Whether you use only the Node.js Worker Pool or maintain separate Worker Pool(s), you should optimize the Task throughput of your Pool(s). + +To do this, minimize the variation in Task times by using Task partitioning. + +## The risks of npm modules +While the Node.js core modules offer building blocks for a wide variety of applications, sometimes something more is needed. Node.js developers benefit tremendously from the [npm ecosystem](https://www.npmjs.com/), with hundreds of thousands of modules offering functionality to accelerate your development process. + +Remember, however, that the majority of these modules are written by third-party developers and are generally released with only best-effort guarantees. A developer using an npm module should be concerned about two things, though the latter is frequently forgotten. + +1. Does it honor its APIs? +2. Might its APIs block the Event Loop or a Worker? Many modules make no effort to indicate the cost of their APIs, to the detriment of the community. + +For simple APIs you can estimate the cost of the APIs; the cost of string manipulation isn't hard to fathom. But in many cases it's unclear how much an API might cost. + +*If you are calling an API that might do something expensive, double-check the cost. Ask the developers to document it, or examine the source code yourself (and submit a PR documenting the cost).* + +Remember, even if the API is asynchronous, you don't know how much time it might spend on a Worker or on the Event Loop in each of its partitions. For example, suppose in the `asyncAvg` example given above, each call to the helper function summed *half* of the numbers rather than one of them. Then this function would still be asynchronous, but the cost of each partition would be `O(n)`, not `O(1)`, making it much less safe to use for arbitrary values of `n`. + +## Conclusion +Node.js has two types of threads: one Event Loop and `k` Workers. The Event Loop is responsible for JavaScript callbacks and non-blocking I/O, and a Worker executes tasks corresponding to C++ code that completes an asynchronous request, including blocking I/O and CPU-intensive work. Both types of threads work on no more than one activity at a time. If any callback or task takes a long time, the thread running it becomes *blocked*. If your application makes blocking callbacks or tasks, this can lead to degraded throughput (clients/second) at best, and complete denial of service at worst. + +To write a high-throughput, more DoS-proof web server, you must ensure that on benign and on malicious input, neither your Event Loop nor your Workers will block. diff --git a/locale/es/docs/guides/event-loop-timers-and-nexttick.md b/locale/es/docs/guides/event-loop-timers-and-nexttick.md new file mode 100644 index 000000000000..c1105b3a73e9 --- /dev/null +++ b/locale/es/docs/guides/event-loop-timers-and-nexttick.md @@ -0,0 +1,335 @@ +--- +title: The Node.js Event Loop, Timers, and process.nextTick() +layout: docs.hbs +--- + +# The Node.js Event Loop, Timers, and `process.nextTick()` + +## What is the Event Loop? + +The event loop is what allows Node.js to perform non-blocking I/O operations — despite the fact that JavaScript is single-threaded — by offloading operations to the system kernel whenever possible. + +Since most modern kernels are multi-threaded, they can handle multiple operations executing in the background. When one of these operations completes, the kernel tells Node.js so that the appropriate callback may be added to the **poll** queue to eventually be executed. We'll explain this in further detail later in this topic. + +## Event Loop Explained + +When Node.js starts, it initializes the event loop, processes the provided input script (or drops into the [REPL](https://nodejs.org/api/repl.html#repl_repl), which is not covered in this document) which may make async API calls, schedule timers, or call `process.nextTick()`, then begins processing the event loop. + +The following diagram shows a simplified overview of the event loop's order of operations. + +``` + ┌───────────────────────────┐ +┌─>│ timers │ +│ └─────────────┬─────────────┘ +│ ┌─────────────┴─────────────┐ +│ │ pending callbacks │ +│ └─────────────┬─────────────┘ +│ ┌─────────────┴─────────────┐ +│ │ idle, prepare │ +│ └─────────────┬─────────────┘ ┌───────────────┐ +│ ┌─────────────┴─────────────┐ │ incoming: │ +│ │ poll │<─────┤ connections, │ +│ └─────────────┬─────────────┘ │ data, etc. │ +│ ┌─────────────┴─────────────┐ └───────────────┘ +│ │ check │ +│ └─────────────┬─────────────┘ +│ ┌─────────────┴─────────────┐ +└──┤ close callbacks │ + └───────────────────────────┘ +``` + +*note: each box will be referred to as a "phase" of the event loop.* + +Each phase has a FIFO queue of callbacks to execute. While each phase is special in its own way, generally, when the event loop enters a given phase, it will perform any operations specific to that phase, then execute callbacks in that phase's queue until the queue has been exhausted or the maximum number of callbacks has executed. When the queue has been exhausted or the callback limit is reached, the event loop will move to the next phase, and so on. + +Since any of these operations may schedule _more_ operations and new events processed in the **poll** phase are queued by the kernel, poll events can be queued while polling events are being processed. As a result, long running callbacks can allow the poll phase to run much longer than a timer's threshold. See the [**timers**](#timers) and [**poll**](#poll) sections for more details. + +_**NOTE:** There is a slight discrepancy between the Windows and the Unix/Linux implementation, but that's not important for this demonstration. The most important parts are here. There are actually seven or eight steps, but the ones we care about — ones that Node.js actually uses - are those above._ + +## Phases Overview + +* **timers**: this phase executes callbacks scheduled by `setTimeout()` and `setInterval()`. +* **pending callbacks**: executes I/O callbacks deferred to the next loop iteration. +* **idle, prepare**: only used internally. +* **poll**: retrieve new I/O events; execute I/O related callbacks (almost all with the exception of close callbacks, the ones scheduled by timers, and `setImmediate()`); node will block here when appropriate. +* **check**: `setImmediate()` callbacks are invoked here. +* **close callbacks**: some close callbacks, e.g. `socket.on('close', ...)`. + +Between each run of the event loop, Node.js checks if it is waiting for any asynchronous I/O or timers and shuts down cleanly if there are not any. + +## Phases in Detail + +### timers + +A timer specifies the **threshold** _after which_ a provided callback _may be executed_ rather than the **exact** time a person _wants it to be executed_. Timers callbacks will run as early as they can be scheduled after the specified amount of time has passed; however, Operating System scheduling or the running of other callbacks may delay them. + +_**Note**: Technically, the [**poll** phase](#poll) controls when timers are executed._ + +For example, say you schedule a timeout to execute after a 100 ms threshold, then your script starts asynchronously reading a file which takes 95 ms: + +```js +const fs = require('fs'); + +function someAsyncOperation(callback) { + // Assume this takes 95ms to complete + fs.readFile('/path/to/file', callback); +} + +const timeoutScheduled = Date.now(); + +setTimeout(() => { + const delay = Date.now() - timeoutScheduled; + + console.log(`${delay}ms have passed since I was scheduled`); +}, 100); + +// do someAsyncOperation which takes 95 ms to complete +someAsyncOperation(() => { + const startCallback = Date.now(); + + // do something that will take 10ms... + while (Date.now() - startCallback < 10) { + // do nothing + } +}); +``` + +When the event loop enters the **poll** phase, it has an empty queue (`fs.readFile()` has not completed), so it will wait for the number of ms remaining until the soonest timer's threshold is reached. While it is waiting 95 ms pass, `fs.readFile()` finishes reading the file and its callback which takes 10 ms to complete is added to the **poll** queue and executed. When the callback finishes, there are no more callbacks in the queue, so the event loop will see that the threshold of the soonest timer has been reached then wrap back to the **timers** phase to execute the timer's callback. In this example, you will see that the total delay between the timer being scheduled and its callback being executed will be 105ms. + +Note: To prevent the **poll** phase from starving the event loop, [libuv](https://libuv.org/) (the C library that implements the Node.js event loop and all of the asynchronous behaviors of the platform) also has a hard maximum (system dependent) before it stops polling for more events. + +### pending callbacks + +This phase executes callbacks for some system operations such as types of TCP errors. For example if a TCP socket receives `ECONNREFUSED` when attempting to connect, some \*nix systems want to wait to report the error. This will be queued to execute in the **pending callbacks** phase. + +### poll + +The **poll** phase has two main functions: + +1. Calculating how long it should block and poll for I/O, then +2. Processing events in the **poll** queue. + +When the event loop enters the **poll** phase _and there are no timers scheduled_, one of two things will happen: + +* _If the **poll** queue **is not empty**_, the event loop will iterate through its queue of callbacks executing them synchronously until either the queue has been exhausted, or the system-dependent hard limit is reached. + +* _If the **poll** queue **is empty**_, one of two more things will happen: + * If scripts have been scheduled by `setImmediate()`, the event loop will end the **poll** phase and continue to the **check** phase to execute those scheduled scripts. + + * If scripts **have not** been scheduled by `setImmediate()`, the event loop will wait for callbacks to be added to the queue, then execute them immediately. + +Once the **poll** queue is empty the event loop will check for timers _whose time thresholds have been reached_. If one or more timers are ready, the event loop will wrap back to the **timers** phase to execute those timers' callbacks. + +### check + +This phase allows a person to execute callbacks immediately after the **poll** phase has completed. If the **poll** phase becomes idle and scripts have been queued with `setImmediate()`, the event loop may continue to the **check** phase rather than waiting. + +`setImmediate()` is actually a special timer that runs in a separate phase of the event loop. It uses a libuv API that schedules callbacks to execute after the **poll** phase has completed. + +Generally, as the code is executed, the event loop will eventually hit the **poll** phase where it will wait for an incoming connection, request, etc. However, if a callback has been scheduled with `setImmediate()` and the **poll** phase becomes idle, it will end and continue to the **check** phase rather than waiting for **poll** events. + +### close callbacks + +If a socket or handle is closed abruptly (e.g. `socket.destroy()`), the `'close'` event will be emitted in this phase. Otherwise it will be emitted via `process.nextTick()`. + +## `setImmediate()` vs `setTimeout()` + +`setImmediate()` and `setTimeout()` are similar, but behave in different ways depending on when they are called. + +* `setImmediate()` is designed to execute a script once the current **poll** phase completes. +* `setTimeout()` schedules a script to be run after a minimum threshold in ms has elapsed. + +The order in which the timers are executed will vary depending on the context in which they are called. If both are called from within the main module, then timing will be bound by the performance of the process (which can be impacted by other applications running on the machine). + +For example, if we run the following script which is not within an I/O cycle (i.e. the main module), the order in which the two timers are executed is non-deterministic, as it is bound by the performance of the process: + +```js +// timeout_vs_immediate.js +setTimeout(() => { + console.log('timeout'); +}, 0); + +setImmediate(() => { + console.log('immediate'); +}); +``` + +``` +$ node timeout_vs_immediate.js +timeout +immediate + +$ node timeout_vs_immediate.js +immediate +timeout +``` + +However, if you move the two calls within an I/O cycle, the immediate callback is always executed first: + +```js +// timeout_vs_immediate.js +const fs = require('fs'); + +fs.readFile(__filename, () => { + setTimeout(() => { + console.log('timeout'); + }, 0); + setImmediate(() => { + console.log('immediate'); + }); +}); +``` + +``` +$ node timeout_vs_immediate.js +immediate +timeout + +$ node timeout_vs_immediate.js +immediate +timeout +``` + +The main advantage to using `setImmediate()` over `setTimeout()` is `setImmediate()` will always be executed before any timers if scheduled within an I/O cycle, independently of how many timers are present. + +## `process.nextTick()` + +### Understanding `process.nextTick()` + +You may have noticed that `process.nextTick()` was not displayed in the diagram, even though it's a part of the asynchronous API. This is because `process.nextTick()` is not technically part of the event loop. Instead, the `nextTickQueue` will be processed after the current operation is completed, regardless of the current phase of the event loop. Here, an *operation* is defined as a transition from the underlying C/C++ handler, and handling the JavaScript that needs to be executed. + +Looking back at our diagram, any time you call `process.nextTick()` in a given phase, all callbacks passed to `process.nextTick()` will be resolved before the event loop continues. This can create some bad situations because **it allows you to "starve" your I/O by making recursive `process.nextTick()` calls**, which prevents the event loop from reaching the **poll** phase. + +### Why would that be allowed? + +Why would something like this be included in Node.js? Part of it is a design philosophy where an API should always be asynchronous even where it doesn't have to be. Take this code snippet for example: + +```js +function apiCall(arg, callback) { + if (typeof arg !== 'string') + return process.nextTick(callback, + new TypeError('argument should be string')); +} +``` + +The snippet does an argument check and if it's not correct, it will pass the error to the callback. The API updated fairly recently to allow passing arguments to `process.nextTick()` allowing it to take any arguments passed after the callback to be propagated as the arguments to the callback so you don't have to nest functions. + +What we're doing is passing an error back to the user but only *after* we have allowed the rest of the user's code to execute. By using `process.nextTick()` we guarantee that `apiCall()` always runs its callback *after* the rest of the user's code and *before* the event loop is allowed to proceed. To achieve this, the JS call stack is allowed to unwind then immediately execute the provided callback which allows a person to make recursive calls to `process.nextTick()` without reaching a `RangeError: Maximum call stack size exceeded from v8`. + +This philosophy can lead to some potentially problematic situations. Take this snippet for example: + +```js +let bar; + +// this has an asynchronous signature, but calls callback synchronously +function someAsyncApiCall(callback) { callback(); } + +// the callback is called before `someAsyncApiCall` completes. +someAsyncApiCall(() => { + // since someAsyncApiCall hasn't completed, bar hasn't been assigned any value + console.log('bar', bar); // undefined +}); + +bar = 1; +``` + +The user defines `someAsyncApiCall()` to have an asynchronous signature, but it actually operates synchronously. When it is called, the callback provided to `someAsyncApiCall()` is called in the same phase of the event loop because `someAsyncApiCall()` doesn't actually do anything asynchronously. As a result, the callback tries to reference `bar` even though it may not have that variable in scope yet, because the script has not been able to run to completion. + +By placing the callback in a `process.nextTick()`, the script still has the ability to run to completion, allowing all the variables, functions, etc., to be initialized prior to the callback being called. It also has the advantage of not allowing the event loop to continue. It may be useful for the user to be alerted to an error before the event loop is allowed to continue. Here is the previous example using `process.nextTick()`: + +```js +let bar; + +function someAsyncApiCall(callback) { + process.nextTick(callback); +} + +someAsyncApiCall(() => { + console.log('bar', bar); // 1 +}); + +bar = 1; +``` + +Here's another real world example: + +```js +const server = net.createServer(() => {}).listen(8080); + +server.on('listening', () => {}); +``` + +When only a port is passed, the port is bound immediately. So, the `'listening'` callback could be called immediately. The problem is that the `.on('listening')` callback will not have been set by that time. + +To get around this, the `'listening'` event is queued in a `nextTick()` to allow the script to run to completion. This allows the user to set any event handlers they want. + +## `process.nextTick()` vs `setImmediate()` + +We have two calls that are similar as far as users are concerned, but their names are confusing. + +* `process.nextTick()` fires immediately on the same phase +* `setImmediate()` fires on the following iteration or 'tick' of the event loop + +In essence, the names should be swapped. `process.nextTick()` fires more immediately than `setImmediate()`, but this is an artifact of the past which is unlikely to change. Making this switch would break a large percentage of the packages on npm. Every day more new modules are being added, which means every day we wait, more potential breakages occur. While they are confusing, the names themselves won't change. + +*We recommend developers use `setImmediate()` in all cases because it's easier to reason about.* + +## Why use `process.nextTick()`? + +There are two main reasons: + +1. Allow users to handle errors, cleanup any then unneeded resources, or perhaps try the request again before the event loop continues. + +2. At times it's necessary to allow a callback to run after the call stack has unwound but before the event loop continues. + +One example is to match the user's expectations. Simple example: + +```js +const server = net.createServer(); +server.on('connection', (conn) => { }); + +server.listen(8080); +server.on('listening', () => { }); +``` + +Say that `listen()` is run at the beginning of the event loop, but the listening callback is placed in a `setImmediate()`. Unless a hostname is passed, binding to the port will happen immediately. For the event loop to proceed, it must hit the **poll** phase, which means there is a non-zero chance that a connection could have been received allowing the connection event to be fired before the listening event. + +Another example is running a function constructor that was to, say, inherit from `EventEmitter` and it wanted to call an event within the constructor: + +```js +const EventEmitter = require('events'); +const util = require('util'); + +function MyEmitter() { + EventEmitter.call(this); + this.emit('event'); +} +util.inherits(MyEmitter, EventEmitter); + +const myEmitter = new MyEmitter(); +myEmitter.on('event', () => { + console.log('an event occurred!'); +}); +``` + +You can't emit an event from the constructor immediately because the script will not have processed to the point where the user assigns a callback to that event. So, within the constructor itself, you can use `process.nextTick()` to set a callback to emit the event after the constructor has finished, which provides the expected results: + +```js +const EventEmitter = require('events'); +const util = require('util'); + +function MyEmitter() { + EventEmitter.call(this); + + // use nextTick to emit the event once a handler is assigned + process.nextTick(() => { + this.emit('event'); + }); +} +util.inherits(MyEmitter, EventEmitter); + +const myEmitter = new MyEmitter(); +myEmitter.on('event', () => { + console.log('an event occurred!'); +}); +``` diff --git a/locale/es/docs/guides/getting-started-guide.md b/locale/es/docs/guides/getting-started-guide.md new file mode 100644 index 000000000000..feac4e929f45 --- /dev/null +++ b/locale/es/docs/guides/getting-started-guide.md @@ -0,0 +1,29 @@ +--- +title: Getting Started Guide +layout: docs.hbs +--- + +# How do I start with Node.js after I installed it? + +Once we have installed Node.js, let's build our first web server. Create a file named `app.js` containing the following contents: + +```javascript +const http = require('http'); + +const hostname = '127.0.0.1'; +const port = 3000; + +const server = http.createServer((req, res) => { + res.statusCode = 200; + res.setHeader('Content-Type', 'text/plain'); + res.end('Hola Mundo'); +}); + +server.listen(port, hostname, () => { + console.log(`El servidor se está ejecutando en http://${hostname}:${port}/`); +}); +``` + +Now, run your web server using `node app.js`. Visit `http://localhost:3000` and you will see a message saying "Hello World". + +Refer to the [Introduction to Node.js](https://nodejs.dev/) for a more comprehensive guide to getting started with Node.js. diff --git a/locale/es/docs/guides/index.md b/locale/es/docs/guides/index.md new file mode 100644 index 000000000000..ad352ee74bac --- /dev/null +++ b/locale/es/docs/guides/index.md @@ -0,0 +1,32 @@ +--- +title: Guides +layout: docs.hbs +--- + +# Guides + +## General + +* [Getting Started Guide](/en/docs/guides/getting-started-guide/) +* [Debugging - Getting Started](/en/docs/guides/debugging-getting-started/) +* [Easy profiling for Node.js Applications](/en/docs/guides/simple-profiling/) +* [Diagnostics - Flame Graphs](/en/docs/guides/diagnostics-flamegraph/) +* [Dockerizing a Node.js web app](/en/docs/guides/nodejs-docker-webapp/) +* [Migrating to safe Buffer constructors](/en/docs/guides/buffer-constructor-deprecation/) + +## Node.js core concepts + +* [Introduction to Node.js](https://nodejs.dev/) +* [Overview of Blocking vs Non-Blocking](/en/docs/guides/blocking-vs-non-blocking/) +* [The Node.js Event Loop, Timers, and `process.nextTick()`](/en/docs/guides/event-loop-timers-and-nexttick/) +* [Don't Block the Event Loop (or the Worker Pool)](/en/docs/guides/dont-block-the-event-loop/) +* [Timers in Node.js](/en/docs/guides/timers-in-node/) + +## Module-related guides + +* [Anatomy of an HTTP Transaction](/en/docs/guides/anatomy-of-an-http-transaction/) +* [Working with Different Filesystems](/en/docs/guides/working-with-different-filesystems/) +* [Backpressuring in Streams](/en/docs/guides/backpressuring-in-streams/) +* [Domain Module Postmortem](/en/docs/guides/domain-postmortem/) +* [How to publish N-API package](/en/docs/guides/publishing-napi-modules/) +* [ABI Stability](/en/docs/guides/abi-stability/) diff --git a/locale/es/docs/guides/nodejs-docker-webapp.md b/locale/es/docs/guides/nodejs-docker-webapp.md new file mode 100644 index 000000000000..27219bd037f4 --- /dev/null +++ b/locale/es/docs/guides/nodejs-docker-webapp.md @@ -0,0 +1,237 @@ +--- +title: Dockerizing a Node.js web app +layout: docs.hbs +--- + +# Dockerizing a Node.js web app + +The goal of this example is to show you how to get a Node.js application into a Docker container. The guide is intended for development, and *not* for a production deployment. The guide also assumes you have a working [Docker installation](https://docs.docker.com/engine/installation/) and a basic understanding of how a Node.js application is structured. + +In the first part of this guide we will create a simple web application in Node.js, then we will build a Docker image for that application, and lastly we will instantiate a container from that image. + +Docker allows you to package an application with its environment and all of its dependencies into a "box", called a container. Usually, a container consists of an application running in a stripped-to-basics version of a Linux operating system. An image is the blueprint for a container, a container is a running instance of an image. + +## Create the Node.js app + +First, create a new directory where all the files would live. In this directory create a `package.json` file that describes your app and its dependencies: + +```json +{ + "name": "docker_web_app", + "version": "1.0.0", + "description": "Node.js on Docker", + "author": "First Last ", + "main": "server.js", + "scripts": { + "start": "node server.js" + }, + "dependencies": { + "express": "^4.16.1" + } +} +``` + +With your new `package.json` file, run `npm install`. If you are using `npm` version 5 or later, this will generate a `package-lock.json` file which will be copied to your Docker image. + +Then, create a `server.js` file that defines a web app using the [Express.js](https://expressjs.com/) framework: + +```javascript +'use strict'; + +const express = require('express'); + +// Constants +const PORT = 8080; +const HOST = '0.0.0.0'; + +// App +const app = express(); +app.get('/', (req, res) => { + res.send('Hello World'); +}); + +app.listen(PORT, HOST); +console.log(`Running on http://${HOST}:${PORT}`); +``` + +In the next steps, we'll look at how you can run this app inside a Docker container using the official Docker image. First, you'll need to build a Docker image of your app. + +## Creating a Dockerfile + +Create an empty file called `Dockerfile`: + +```markup +touch Dockerfile +``` + +Open the `Dockerfile` in your favorite text editor + +The first thing we need to do is define from what image we want to build from. Here we will use the latest LTS (long term support) version `10` of `node` available from the [Docker Hub](https://hub.docker.com/): + +```docker +FROM node:10 +``` + +Next we create a directory to hold the application code inside the image, this will be the working directory for your application: + +```docker +# Create app directory +WORKDIR /usr/src/app +``` + +This image comes with Node.js and NPM already installed so the next thing we need to do is to install your app dependencies using the `npm` binary. Please note that if you are using `npm` version 4 or earlier a `package-lock.json` file will *not* be generated. + +```docker +# Install app dependencies +# A wildcard is used to ensure both package.json AND package-lock.json are copied +# where available (npm@5+) +COPY package*.json ./ + +RUN npm install +# If you are building your code for production +# RUN npm ci --only=production +``` + +Note that, rather than copying the entire working directory, we are only copying the `package.json` file. This allows us to take advantage of cached Docker layers. bitJudo has a good explanation of this [here](http://bitjudo.com/blog/2014/03/13/building-efficient-dockerfiles-node-dot-js/). Furthermore, the `npm ci` command, specified in the comments, helps provide faster, reliable, reproducible builds for production environments. You can read more about this [here](https://blog.npmjs.org/post/171556855892/introducing-npm-ci-for-faster-more-reliable). + +To bundle your app's source code inside the Docker image, use the `COPY` instruction: + +```docker +# Bundle app source +COPY . . +``` + +Your app binds to port `8080` so you'll use the `EXPOSE` instruction to have it mapped by the `docker` daemon: + +```docker +EXPOSE 8080 +``` + +Last but not least, define the command to run your app using `CMD` which defines your runtime. Here we will use `node server.js` to start your server: + +```docker +CMD [ "node", "server.js" ] +``` + +Your `Dockerfile` should now look like this: + +```docker +FROM node:10 + +# Create app directory +WORKDIR /usr/src/app + +# Install app dependencies +# A wildcard is used to ensure both package.json AND package-lock.json are copied +# where available (npm@5+) +COPY package*.json ./ + +RUN npm install +# If you are building your code for production +# RUN npm ci --only=production + +# Bundle app source +COPY . . + +EXPOSE 8080 +CMD [ "node", "server.js" ] +``` + +## .dockerignore file + +Create a `.dockerignore` file in the same directory as your `Dockerfile` with following content: + +``` +node_modules +npm-debug.log +``` + +This will prevent your local modules and debug logs from being copied onto your Docker image and possibly overwriting modules installed within your image. + +## Building your image + +Go to the directory that has your `Dockerfile` and run the following command to build the Docker image. The `-t` flag lets you tag your image so it's easier to find later using the `docker images` command: + +```bash +docker build -t /node-web-app . +``` + +Your image will now be listed by Docker: + +```bash +$ docker images + +# Example +REPOSITORY TAG ID CREATED +node 10 1934b0b038d1 5 days ago +/node-web-app latest d64d3505b0d2 1 minute ago +``` + +## Run the image + +Running your image with `-d` runs the container in detached mode, leaving the container running in the background. The `-p` flag redirects a public port to a private port inside the container. Run the image you previously built: + +```bash +docker run -p 49160:8080 -d /node-web-app +``` + +Print the output of your app: + +```bash +# Get container ID +$ docker ps + +# Print app output +$ docker logs + +# Example +Running on http://localhost:8080 +``` + +If you need to go inside the container you can use the `exec` command: + +```bash +# Enter the container +$ docker exec -it /bin/bash +``` + +## Test + +To test your app, get the port of your app that Docker mapped: + +```bash +$ docker ps + +# Example +ID IMAGE COMMAND ... PORTS +ecce33b30ebf /node-web-app:latest npm start ... 49160->8080 +``` + +In the example above, Docker mapped the `8080` port inside of the container to the port `49160` on your machine. + +Now you can call your app using `curl` (install if needed via: `sudo apt-get +install curl`): + +```bash +$ curl -i localhost:49160 + +HTTP/1.1 200 OK +X-Powered-By: Express +Content-Type: text/html; charset=utf-8 +Content-Length: 12 +ETag: W/"c-M6tWOb/Y57lesdjQuHeB1P/qTV0" +Date: Mon, 13 Nov 2017 20:53:59 GMT +Connection: keep-alive + +Hello world +``` + +We hope this tutorial helped you get up and running a simple Node.js application on Docker. + +You can find more information about Docker and Node.js on Docker in the following places: + +* [Official Node.js Docker Image](https://hub.docker.com/_/node/) +* [Node.js Docker Best Practices Guide](https://github.com/nodejs/docker-node/blob/master/docs/BestPractices.md) +* [Official Docker documentation](https://docs.docker.com/) +* [Docker Tag on Stack Overflow](https://stackoverflow.com/questions/tagged/docker) +* [Docker Subreddit](https://reddit.com/r/docker) diff --git a/locale/es/docs/guides/publishing-napi-modules.md b/locale/es/docs/guides/publishing-napi-modules.md new file mode 100644 index 000000000000..d78432a4305d --- /dev/null +++ b/locale/es/docs/guides/publishing-napi-modules.md @@ -0,0 +1,37 @@ +--- +title: How to publish N-API package +layout: docs.hbs +--- + +# To publish N-API version of a package alongside a non-N-API version + +The following steps are illustrated using the package `iotivity-node`: + +* First, publish the non-N-API version: + * Update the version in `package.json`. For `iotivity-node`, the version becomes `1.2.0-2`. + * Go through the release checklist (ensure tests/demos/docs are OK) + * `npm publish` +* Then, publish the N-API version: + * Update the version in `package.json`. In the case of `iotivity-node`, the version becomes `1.2.0-3`. For versioning, we recommend following the pre-release version scheme as described by [semver.org](https://semver.org/#spec-item-9) e.g. `1.2.0-napi`. + * Go through the release checklist (ensure tests/demos/docs are OK) + * `npm publish --tag n-api` + +In this example, tagging the release with `n-api` has ensured that, although version 1.2.0-3 is later than the non-N-API published version (1.2.0-2), it will not be installed if someone chooses to install `iotivity-node` by simply running `npm install iotivity-node`. This will install the non-N-API version by default. The user will have to run `npm install iotivity-node@n-api` to receive the N-API version. For more information on using tags with npm check out ["Using dist-tags"](https://docs.npmjs.com/getting-started/using-tags). + +# To introduce a dependency on an N-API version of a package + +To add the N-API version of `iotivity-node` as a dependency, the `package.json` will look like this: + +```json +"dependencies": { + "iotivity-node": "n-api" +} +``` + +**Note:** As explained in ["Using dist-tags"](https://docs.npmjs.com/getting-started/using-tags), unlike regular versions, tagged versions cannot be addressed by version ranges such as `"^2.0.0"` inside `package.json`. The reason for this is that the tag refers to exactly one version. So, if the package maintainer chooses to tag a later version of the package using the same tag, `npm update` will receive the later version. This should be acceptable given the currently experimental nature of N-API. To depend on an N-API-enabled version other than the latest published, the `package.json` dependency will have to refer to the exact version like the following: + +```json +"dependencies": { + "iotivity-node": "1.2.0-3" +} +``` diff --git a/locale/es/docs/guides/simple-profiling.md b/locale/es/docs/guides/simple-profiling.md new file mode 100644 index 000000000000..aa0392569fb7 --- /dev/null +++ b/locale/es/docs/guides/simple-profiling.md @@ -0,0 +1,217 @@ +--- +title: Easy profiling for Node.js Applications +layout: docs.hbs +--- + +# Easy profiling for Node.js Applications + +There are many third party tools available for profiling Node.js applications but, in many cases, the easiest option is to use the Node.js built in profiler. The built in profiler uses the [profiler inside V8](https://v8.dev/docs/profile) which samples the stack at regular intervals during program execution. It records the results of these samples, along with important optimization events such as jit compiles, as a series of ticks: + +``` +code-creation,LazyCompile,0,0x2d5000a337a0,396,"bp native array.js:1153:16",0x289f644df68,~ +code-creation,LazyCompile,0,0x2d5000a33940,716,"hasOwnProperty native v8natives.js:198:30",0x289f64438d0,~ +code-creation,LazyCompile,0,0x2d5000a33c20,284,"ToName native runtime.js:549:16",0x289f643bb28,~ +code-creation,Stub,2,0x2d5000a33d40,182,"DoubleToIStub" +code-creation,Stub,2,0x2d5000a33e00,507,"NumberToStringStub" +``` + +In the past, you needed the V8 source code to be able to interpret the ticks. Luckily, tools have been introduced since Node.js 4.4.0 that facilitate the consumption of this information without separately building V8 from source. Let's see how the built-in profiler can help provide insight into application performance. + +To illustrate the use of the tick profiler, we will work with a simple Express application. Our application will have two handlers, one for adding new users to our system: + +```javascript +app.get('/newUser', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || users[username]) { + return res.sendStatus(400); + } + + const salt = crypto.randomBytes(128).toString('base64'); + const hash = crypto.pbkdf2Sync(password, salt, 10000, 512, 'sha512'); + + users[username] = { salt, hash }; + + res.sendStatus(200); +}); +``` + +and another for validating user authentication attempts: + +```javascript +app.get('/auth', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || !users[username]) { + return res.sendStatus(400); + } + + const { salt, hash } = users[username]; + const encryptHash = crypto.pbkdf2Sync(password, salt, 10000, 512, 'sha512'); + + if (crypto.timingSafeEqual(hash, encryptHash)) { + res.sendStatus(200); + } else { + res.sendStatus(401); + } +}); +``` + +*Please note that these are NOT recommended handlers for authenticating users in your Node.js applications and are used purely for illustration purposes. You should not be trying to design your own cryptographic authentication mechanisms in general. It is much better to use existing, proven authentication solutions.* + +Now assume that we've deployed our application and users are complaining about high latency on requests. We can easily run the app with the built in profiler: + +``` +NODE_ENV=production node --prof app.js +``` + +and put some load on the server using `ab` (ApacheBench): + +``` +curl -X GET "http://localhost:8080/newUser?username=matt&password=password" +ab -k -c 20 -n 250 "http://localhost:8080/auth?username=matt&password=password" +``` + +and get an ab output of: + +``` +Concurrency Level: 20 +Time taken for tests: 46.932 seconds +Complete requests: 250 +Failed requests: 0 +Keep-Alive requests: 250 +Total transferred: 50250 bytes +HTML transferred: 500 bytes +Requests per second: 5.33 [#/sec] (mean) +Time per request: 3754.556 [ms] (mean) +Time per request: 187.728 [ms] (mean, across all concurrent requests) +Transfer rate: 1.05 [Kbytes/sec] received + +... + +Percentage of the requests served within a certain time (ms) + 50% 3755 + 66% 3804 + 75% 3818 + 80% 3825 + 90% 3845 + 95% 3858 + 98% 3874 + 99% 3875 + 100% 4225 (longest request) +``` + +From this output, we see that we're only managing to serve about 5 requests per second and that the average request takes just under 4 seconds round trip. In a real world example, we could be doing lots of work in many functions on behalf of a user request but even in our simple example, time could be lost compiling regular expressions, generating random salts, generating unique hashes from user passwords, or inside the Express framework itself. + +Since we ran our application using the `--prof` option, a tick file was generated in the same directory as your local run of the application. It should have the form `isolate-0xnnnnnnnnnnnn-v8.log` (where `n` is a digit). + +In order to make sense of this file, we need to use the tick processor bundled with the Node.js binary. To run the processor, use the `--prof-process` flag: + +``` +node --prof-process isolate-0xnnnnnnnnnnnn-v8.log > processed.txt +``` + +Opening processed.txt in your favorite text editor will give you a few different types of information. The file is broken up into sections which are again broken up by language. First, we look at the summary section and see: + +``` + [Summary]: + ticks total nonlib name + 79 0.2% 0.2% JavaScript + 36703 97.2% 99.2% C++ + 7 0.0% 0.0% GC + 767 2.0% Shared libraries + 215 0.6% Unaccounted +``` + +This tells us that 97% of all samples gathered occurred in C++ code and that when viewing other sections of the processed output we should pay most attention to work being done in C++ (as opposed to JavaScript). With this in mind, we next find the [C++] section which contains information about which C++ functions are taking the most CPU time and see: + +``` + [C++]: + ticks total nonlib name + 19557 51.8% 52.9% node::crypto::PBKDF2(v8::FunctionCallbackInfo const&) + 4510 11.9% 12.2% _sha1_block_data_order + 3165 8.4% 8.6% _malloc_zone_malloc +``` + +We see that the top 3 entries account for 72.1% of CPU time taken by the program. From this output, we immediately see that at least 51.8% of CPU time is taken up by a function called PBKDF2 which corresponds to our hash generation from a user's password. However, it may not be immediately obvious how the lower two entries factor into our application (or if it is we will pretend otherwise for the sake of example). To better understand the relationship between these functions, we will next look at the [Bottom up (heavy) profile] section which provides information about the primary callers of each function. Examining this section, we find: + +``` + ticks parent name + 19557 51.8% node::crypto::PBKDF2(v8::FunctionCallbackInfo const&) + 19557 100.0% v8::internal::Builtins::~Builtins() + 19557 100.0% LazyCompile: ~pbkdf2 crypto.js:557:16 + + 4510 11.9% _sha1_block_data_order + 4510 100.0% LazyCompile: *pbkdf2 crypto.js:557:16 + 4510 100.0% LazyCompile: *exports.pbkdf2Sync crypto.js:552:30 + + 3165 8.4% _malloc_zone_malloc + 3161 99.9% LazyCompile: *pbkdf2 crypto.js:557:16 + 3161 100.0% LazyCompile: *exports.pbkdf2Sync crypto.js:552:30 +``` + +Parsing this section takes a little more work than the raw tick counts above. Within each of the "call stacks" above, the percentage in the parent column tells you the percentage of samples for which the function in the row above was called by the function in the current row. For example, in the middle "call stack" above for _sha1_block_data_order, we see that `_sha1_block_data_order` occurred in 11.9% of samples, which we knew from the raw counts above. However, here, we can also tell that it was always called by the pbkdf2 function inside the Node.js crypto module. We see that similarly, `_malloc_zone_malloc` was called almost exclusively by the same pbkdf2 function. Thus, using the information in this view, we can tell that our hash computation from the user's password accounts not only for the 51.8% from above but also for all CPU time in the top 3 most sampled functions since the calls to `_sha1_block_data_order` and `_malloc_zone_malloc` were made on behalf of the pbkdf2 function. + +At this point, it is very clear that the password based hash generation should be the target of our optimization. Thankfully, you've fully internalized the [benefits of asynchronous programming](https://nodesource.com/blog/why-asynchronous) and you realize that the work to generate a hash from the user's password is being done in a synchronous way and thus tying down the event loop. This prevents us from working on other incoming requests while computing a hash. + +To remedy this issue, you make a small modification to the above handlers to use the asynchronous version of the pbkdf2 function: + +```javascript +app.get('/auth', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || !users[username]) { + return res.sendStatus(400); + } + + crypto.pbkdf2(password, users[username].salt, 10000, 512, 'sha512', (err, hash) => { + if (users[username].hash.toString() === hash.toString()) { + res.sendStatus(200); + } else { + res.sendStatus(401); + } + }); +}); +``` + +A new run of the ab benchmark above with the asynchronous version of your app yields: + +``` +Concurrency Level: 20 +Time taken for tests: 12.846 seconds +Complete requests: 250 +Failed requests: 0 +Keep-Alive requests: 250 +Total transferred: 50250 bytes +HTML transferred: 500 bytes +Requests per second: 19.46 [#/sec] (mean) +Time per request: 1027.689 [ms] (mean) +Time per request: 51.384 [ms] (mean, across all concurrent requests) +Transfer rate: 3.82 [Kbytes/sec] received + +... + +Percentage of the requests served within a certain time (ms) + 50% 1018 + 66% 1035 + 75% 1041 + 80% 1043 + 90% 1049 + 95% 1063 + 98% 1070 + 99% 1071 + 100% 1079 (longest request) +``` + +Yay! Your app is now serving about 20 requests per second, roughly 4 times more than it was with the synchronous hash generation. Additionally, the average latency is down from the 4 seconds before to just over 1 second. + +Hopefully, through the performance investigation of this (admittedly contrived) example, you've seen how the V8 tick processor can help you gain a better understanding of the performance of your Node.js applications. diff --git a/locale/es/docs/guides/timers-in-node.md b/locale/es/docs/guides/timers-in-node.md new file mode 100644 index 000000000000..4cf765124e51 --- /dev/null +++ b/locale/es/docs/guides/timers-in-node.md @@ -0,0 +1,125 @@ +--- +title: Timers in Node.js +layout: docs.hbs +--- + +# Timers in Node.js and beyond + +The Timers module in Node.js contains functions that execute code after a set period of time. Timers do not need to be imported via `require()`, since all the methods are available globally to emulate the browser JavaScript API. To fully understand when timer functions will be executed, it's a good idea to read up on the Node.js [Event Loop](/en/docs/guides/event-loop-timers-and-nexttick/). + +## Controlling the Time Continuum with Node.js + +The Node.js API provides several ways of scheduling code to execute at some point after the present moment. The functions below may seem familiar, since they are available in most browsers, but Node.js actually provides its own implementation of these methods. Timers integrate very closely with the system, and despite the fact that the API mirrors the browser API, there are some differences in implementation. + +### "When I say so" Execution ~ *`setTimeout()`* + +`setTimeout()` can be used to schedule code execution after a designated amount of milliseconds. This function is similar to [`window.setTimeout()`](https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/setTimeout) from the browser JavaScript API, however a string of code cannot be passed to be executed. + +`setTimeout()` accepts a function to execute as its first argument and the millisecond delay defined as a number as the second argument. Additional arguments may also be included and these will be passed on to the function. Here is an example of that: + +```js +function myFunc(arg) { + console.log(`arg was => ${arg}`); +} + +setTimeout(myFunc, 1500, 'funky'); +``` + +The above function `myFunc()` will execute as close to 1500 milliseconds (or 1.5 seconds) as possible due to the call of `setTimeout()`. + +The timeout interval that is set cannot be relied upon to execute after that *exact* number of milliseconds. This is because other executing code that blocks or holds onto the event loop will push the execution of the timeout back. The *only* guarantee is that the timeout will not execute *sooner* than the declared timeout interval. + +`setTimeout()` returns a `Timeout` object that can be used to reference the timeout that was set. This returned object can be used to cancel the timeout ( see `clearTimeout()` below) as well as change the execution behavior (see `unref()` below). + +### "Right after this" Execution ~ *`setImmediate()`* + +`setImmediate()` will execute code at the end of the current event loop cycle. This code will execute *after* any I/O operations in the current event loop and *before* any timers scheduled for the next event loop. This code execution could be thought of as happening "right after this", meaning any code following the `setImmediate()` function call will execute before the `setImmediate()` function argument. + +The first argument to `setImmediate()` will be the function to execute. Any subsequent arguments will be passed to the function when it is executed. Here's an example: + +```js +console.log('before immediate'); + +setImmediate((arg) => { + console.log(`executing immediate: ${arg}`); +}, 'so immediate'); + +console.log('after immediate'); +``` + +The above function passed to `setImmediate()` will execute after all runnable code has executed, and the console output will be: + +``` +before immediate +after immediate +executing immediate: so immediate +``` + +`setImmediate()` returns an `Immediate` object, which can be used to cancel the scheduled immediate (see `clearImmediate()` below). + +Note: Don't get `setImmediate()` confused with `process.nextTick()`. There are some major ways they differ. The first is that `process.nextTick()` will run *before* any `Immediate`s that are set as well as before any scheduled I/O. The second is that `process.nextTick()` is non-clearable, meaning once code has been scheduled to execute with `process.nextTick()`, the execution cannot be stopped, just like with a normal function. Refer to [this guide](/en/docs/guides/event-loop-timers-and-nexttick/#process-nexttick) to better understand the operation of `process.nextTick()`. + +### "Infinite Loop" Execution ~ *`setInterval()`* + +If there is a block of code that should execute multiple times, `setInterval()` can be used to execute that code. `setInterval()` takes a function argument that will run an infinite number of times with a given millisecond delay as the second argument. Just like `setTimeout()`, additional arguments can be added beyond the delay, and these will be passed on to the function call. Also like `setTimeout()`, the delay cannot be guaranteed because of operations that may hold on to the event loop, and therefore should be treated as an approximate delay. See the below example: + +```js +function intervalFunc() { + console.log('Cant stop me now!'); +} + +setInterval(intervalFunc, 1500); +``` + +In the above example, `intervalFunc()` will execute about every 1500 milliseconds, or 1.5 seconds, until it is stopped (see below). + +Just like `setTimeout()`, `setInterval()` also returns a `Timeout` object which can be used to reference and modify the interval that was set. + +## Clearing the Future + +What can be done if a `Timeout` or `Immediate` object needs to be cancelled? `setTimeout()`, `setImmediate()`, and `setInterval()` return a timer object that can be used to reference the set `Timeout` or `Immediate` object. By passing said object into the respective `clear` function, execution of that object will be halted completely. The respective functions are `clearTimeout()`, `clearImmediate()`, and `clearInterval()`. See the example below for an example of each: + +```js +const timeoutObj = setTimeout(() => { + console.log('timeout beyond time'); +}, 1500); + +const immediateObj = setImmediate(() => { + console.log('immediately executing immediate'); +}); + +const intervalObj = setInterval(() => { + console.log('interviewing the interval'); +}, 500); + +clearTimeout(timeoutObj); +clearImmediate(immediateObj); +clearInterval(intervalObj); +``` + +## Leaving Timeouts Behind + +Remember that `Timeout` objects are returned by `setTimeout` and `setInterval`. The `Timeout` object provides two functions intended to augment `Timeout` behavior with `unref()` and `ref()`. If there is a `Timeout` object scheduled using a `set` function, `unref()` can be called on that object. This will change the behavior slightly, and not call the `Timeout` object *if it is the last code to execute*. The `Timeout` object will not keep the process alive, waiting to execute. + +In similar fashion, a `Timeout` object that has had `unref()` called on it can remove that behavior by calling `ref()` on that same `Timeout` object, which will then ensure its execution. Be aware, however, that this does not *exactly* restore the initial behavior for performance reasons. See below for examples of both: + +```js +const timerObj = setTimeout(() => { + console.log('will i run?'); +}); + +// if left alone, this statement will keep the above +// timeout from running, since the timeout will be the only +// thing keeping the program from exiting +timerObj.unref(); + +// we can bring it back to life by calling ref() inside +// an immediate +setImmediate(() => { + timerObj.ref(); +}); +``` + +## Further Down the Event Loop + +There's much more to the Event Loop and Timers than this guide has covered. To learn more about the internals of the Node.js Event Loop and how Timers operate during execution, check out this Node.js guide: [The Node.js Event Loop, Timers, and process.nextTick()](/en/docs/guides/event-loop-timers-and-nexttick/). diff --git a/locale/es/docs/guides/working-with-different-filesystems.md b/locale/es/docs/guides/working-with-different-filesystems.md new file mode 100644 index 000000000000..f4b875c0da31 --- /dev/null +++ b/locale/es/docs/guides/working-with-different-filesystems.md @@ -0,0 +1,90 @@ +--- +title: Working with Different Filesystems +layout: docs.hbs +--- + +# Working with Different Filesystems + +Node.js exposes many features of the filesystem. But not all filesystems are alike. The following are suggested best practices to keep your code simple and safe when working with different filesystems. + +## Filesystem Behavior + +Before you can work with a filesystem, you need to know how it behaves. Different filesystems behave differently and have more or less features than others: case sensitivity, case insensitivity, case preservation, Unicode form preservation, timestamp resolution, extended attributes, inodes, Unix permissions, alternate data streams etc. + +Be wary of inferring filesystem behavior from `process.platform`. For example, do not assume that because your program is running on Darwin that you are therefore working on a case-insensitive filesystem (HFS+), as the user may be using a case-sensitive filesystem (HFSX). Similarly, do not assume that because your program is running on Linux that you are therefore working on a filesystem which supports Unix permissions and inodes, as you may be on a particular external drive, USB or network drive which does not. + +The operating system may not make it easy to infer filesystem behavior, but all is not lost. Instead of keeping a list of every known filesystem and behavior (which is always going to be incomplete), you can probe the filesystem to see how it actually behaves. The presence or absence of certain features which are easy to probe, are often enough to infer the behavior of other features which are more difficult to probe. + +Remember that some users may have different filesystems mounted at various paths in the working tree. + +## Avoid a Lowest Common Denominator Approach + +You might be tempted to make your program act like a lowest common denominator filesystem, by normalizing all filenames to uppercase, normalizing all filenames to NFC Unicode form, and normalizing all file timestamps to say 1-second resolution. This would be the lowest common denominator approach. + +Do not do this. You would only be able to interact safely with a filesystem which has the exact same lowest common denominator characteristics in every respect. You would be unable to work with more advanced filesystems in the way that users expect, and you would run into filename or timestamp collisions. You would most certainly lose and corrupt user data through a series of complicated dependent events, and you would create bugs that would be difficult if not impossible to solve. + +What happens when you later need to support a filesystem that only has 2-second or 24-hour timestamp resolution? What happens when the Unicode standard advances to include a slightly different normalization algorithm (as has happened in the past)? + +A lowest common denominator approach would tend to try to create a portable program by using only "portable" system calls. This leads to programs that are leaky and not in fact portable. + +## Adopt a Superset Approach + +Make the best use of each platform you support by adopting a superset approach. For example, a portable backup program should sync btimes (the created time of a file or folder) correctly between Windows systems, and should not destroy or alter btimes, even though btimes are not supported on Linux systems. The same portable backup program should sync Unix permissions correctly between Linux systems, and should not destroy or alter Unix permissions, even though Unix permissions are not supported on Windows systems. + +Handle different filesystems by making your program act like a more advanced filesystem. Support a superset of all possible features: case-sensitivity, case-preservation, Unicode form sensitivity, Unicode form preservation, Unix permissions, high-resolution nanosecond timestamps, extended attributes etc. + +Once you have case-preservation in your program, you can always implement case-insensitivity if you need to interact with a case-insensitive filesystem. But if you forego case-preservation in your program, you cannot interact safely with a case-preserving filesystem. The same is true for Unicode form preservation and timestamp resolution preservation. + +If a filesystem provides you with a filename in a mix of lowercase and uppercase, then keep the filename in the exact case given. If a filesystem provides you with a filename in mixed Unicode form or NFC or NFD (or NFKC or NFKD), then keep the filename in the exact byte sequence given. If a filesystem provides you with a millisecond timestamp, then keep the timestamp in millisecond resolution. + +When you work with a lesser filesystem, you can always downsample appropriately, with comparison functions as required by the behavior of the filesystem on which your program is running. If you know that the filesystem does not support Unix permissions, then you should not expect to read the same Unix permissions you write. If you know that the filesystem does not preserve case, then you should be prepared to see `ABC` in a directory listing when your program creates `abc`. But if you know that the filesystem does preserve case, then you should consider `ABC` to be a different filename to `abc`, when detecting file renames or if the filesystem is case-sensitive. + +## Case Preservation + +You may create a directory called `test/abc` and be surprised to see sometimes that `fs.readdir('test')` returns `['ABC']`. This is not a bug in Node. Node returns the filename as the filesystem stores it, and not all filesystems support case-preservation. Some filesystems convert all filenames to uppercase (or lowercase). + +## Unicode Form Preservation + +*Case preservation and Unicode form preservation are similar concepts. To understand why Unicode form should be preserved , make sure that you first understand why case should be preserved. Unicode form preservation is just as simple when understood correctly.* + +Unicode can encode the same characters using several different byte sequences. Several strings may look the same, but have different byte sequences. When working with UTF-8 strings, be careful that your expectations are in line with how Unicode works. Just as you would not expect all UTF-8 characters to encode to a single byte, you should not expect several UTF-8 strings that look the same to the human eye to have the same byte representation. This may be an expectation that you can have of ASCII, but not of UTF-8. + +You may create a directory called `test/café` (NFC Unicode form with byte sequence `<63 61 66 c3 a9>` and `string.length === 5`) and be surprised to see sometimes that `fs.readdir('test')` returns `['café']` (NFD Unicode form with byte sequence `<63 61 66 65 cc 81>` and `string.length === 6`). This is not a bug in Node. Node.js returns the filename as the filesystem stores it, and not all filesystems support Unicode form preservation. + +HFS+, for example, will normalize all filenames to a form almost always the same as NFD form. Do not expect HFS+ to behave the same as NTFS or EXT4 and vice-versa. Do not try to change data permanently through normalization as a leaky abstraction to paper over Unicode differences between filesystems. This would create problems without solving any. Rather, preserve Unicode form and use normalization as a comparison function only. + +## Unicode Form Insensitivity + +Unicode form insensitivity and Unicode form preservation are two different filesystem behaviors often mistaken for each other. Just as case-insensitivity has sometimes been incorrectly implemented by permanently normalizing filenames to uppercase when storing and transmitting filenames, so Unicode form insensitivity has sometimes been incorrectly implemented by permanently normalizing filenames to a certain Unicode form (NFD in the case of HFS+) when storing and transmitting filenames. It is possible and much better to implement Unicode form insensitivity without sacrificing Unicode form preservation, by using Unicode normalization for comparison only. + +## Comparing Different Unicode Forms + +Node.js provides `string.normalize('NFC' / 'NFD')` which you can use to normalize a UTF-8 string to either NFC or NFD. You should never store the output from this function but only use it as part of a comparison function to test whether two UTF-8 strings would look the same to the user. + +You can use `string1.normalize('NFC') === string2.normalize('NFC')` or `string1.normalize('NFD') === string2.normalize('NFD')` as your comparison function. Which form you use does not matter. + +Normalization is fast but you may want to use a cache as input to your comparison function to avoid normalizing the same string many times over. If the string is not present in the cache then normalize it and cache it. Be careful not to store or persist the cache, use it only as a cache. + +Note that using `normalize()` requires that your version of Node.js include ICU (otherwise `normalize()` will just return the original string). If you download the latest version of Node.js from the website then it will include ICU. + +## Timestamp Resolution + +You may set the `mtime` (the modified time) of a file to `1444291759414` (millisecond resolution) and be surprised to see sometimes that `fs.stat` returns the new mtime as `1444291759000` (1-second resolution) or `1444291758000` (2-second resolution). This is not a bug in Node. Node.js returns the timestamp as the filesystem stores it, and not all filesystems support nanosecond, millisecond or 1-second timestamp resolution. Some filesystems even have very coarse resolution for the atime timestamp in particular, e.g. 24 hours for some FAT filesystems. + +## Do Not Corrupt Filenames and Timestamps Through Normalization + +Filenames and timestamps are user data. Just as you would never automatically rewrite user file data to uppercase the data or normalize `CRLF` to `LF` line-endings, so you should never change, interfere or corrupt filenames or timestamps through case / Unicode form / timestamp normalization. Normalization should only ever be used for comparison, never for altering data. + +Normalization is effectively a lossy hash code. You can use it to test for certain kinds of equivalence (e.g. do several strings look the same even though they have different byte sequences) but you can never use it as a substitute for the actual data. Your program should pass on filename and timestamp data as is. + +Your program can create new data in NFC (or in any combination of Unicode form it prefers) or with a lowercase or uppercase filename, or with a 2-second resolution timestamp, but your program should not corrupt existing user data by imposing case / Unicode form / timestamp normalization. Rather, adopt a superset approach and preserve case, Unicode form and timestamp resolution in your program. That way, you will be able to interact safely with filesystems which do the same. + +## Use Normalization Comparison Functions Appropriately + +Make sure that you use case / Unicode form / timestamp comparison functions appropriately. Do not use a case-insensitive filename comparison function if you are working on a case-sensitive filesystem. Do not use a Unicode form insensitive comparison function if you are working on a Unicode form sensitive filesystem (e.g. NTFS and most Linux filesystems which preserve both NFC and NFD or mixed Unicode forms). Do not compare timestamps at 2-second resolution if you are working on a nanosecond timestamp resolution filesystem. + +## Be Prepared for Slight Differences in Comparison Functions + +Be careful that your comparison functions match those of the filesystem (or probe the filesystem if possible to see how it would actually compare). Case-insensitivity for example is more complex than a simple `toLowerCase()` comparison. In fact, `toUpperCase()` is usually better than `toLowerCase()` (since it handles certain foreign language characters differently). But better still would be to probe the filesystem since every filesystem has its own case comparison table baked in. + +As an example, Apple's HFS+ normalizes filenames to NFD form but this NFD form is actually an older version of the current NFD form and may sometimes be slightly different from the latest Unicode standard's NFD form. Do not expect HFS+ NFD to be exactly the same as Unicode NFD all the time. diff --git a/locale/es/docs/index.md b/locale/es/docs/index.md index 85ca60bd6521..d632b742672f 100644 --- a/locale/es/docs/index.md +++ b/locale/es/docs/index.md @@ -15,9 +15,7 @@ Existen diferentes tipos de documentación disponible en este sitio: ## Referencia de la API -La [Referencia de la API](https://nodejs.org/api/) proporciona información detallada sobre una función ó un objeto en Node.js. Esta -documentación indica que argumentos acepta un método, el valor que retorna este método y qué errores pueden estar -relacionados al mismo. También indica qué métodos están disponibles para las diferentes versiones de Node.js +La [Referencia de la API](https://nodejs.org/api/) proporciona información detallada sobre una función ó un objeto en Node.js. Esta documentación indica que argumentos acepta un método, el valor que retorna este método y qué errores pueden estar relacionados al mismo. También indica qué métodos están disponibles para las diferentes versiones de Node.js También describe los módulos incluidos que proporciona Node.js, pero no documenta los módulos que proporciona la comunidad. @@ -43,9 +41,7 @@ También describe los módulos incluidos que proporciona Node.js, pero no docume ## Funcionalidades de ES6 -La [sección de ES6](/en/docs/es6/) describe los tres grupos de funcionalidades de ES6, y detalla qué -funcionalidades están activadas por defecto en Node.js, junto con enlaces explicativos. También muestra cómo encontrar -qué versión de V8 usa una versión particular de Node.js. +La [sección de ES6](/en/docs/es6/) describe los tres grupos de funcionalidades de ES6, y detalla qué funcionalidades están activadas por defecto en Node.js, junto con enlaces explicativos. También muestra cómo encontrar qué versión de V8 usa una versión particular de Node.js. ## Guías diff --git a/locale/es/docs/meta/topics/dependencies.md b/locale/es/docs/meta/topics/dependencies.md new file mode 100644 index 000000000000..db12c22a2ace --- /dev/null +++ b/locale/es/docs/meta/topics/dependencies.md @@ -0,0 +1,78 @@ +--- +title: Dependencies +layout: docs.hbs +--- + +# Dependencies + +There are several dependencies that Node.js relies on to work the way it does. + +* [Libraries](#libraries) + * [V8](#v8) + * [libuv](#libuv) + * [llhttp](#llhttp) + * [c-ares](#c-ares) + * [OpenSSL](#openssl) + * [zlib](#zlib) +* [Tools](#tools) + * [npm](#npm) + * [gyp](#gyp) + * [gtest](#gtest) + +## Libraries + +### V8 + +The V8 library provides Node.js with a JavaScript engine, which Node.js controls via the V8 C++ API. V8 is maintained by Google, for use in Chrome. + +* [Documentation](https://v8.dev/docs) + +### libuv + +Another important dependency is libuv, a C library that is used to abstract non-blocking I/O operations to a consistent interface across all supported platforms. It provides mechanisms to handle file system, DNS, network, child processes, pipes, signal handling, polling and streaming. It also includes a thread pool for offloading work for some things that can't be done asynchronously at the operating system level. + +* [Documentation](http://docs.libuv.org/) + +### llhttp + +HTTP parsing is handled by a lightweight TypeScript and C library called llhttp. It is designed to not make any syscalls or allocations, so it has a very small per-request memory footprint. + +* [Documentation](https://github.com/nodejs/llhttp) + +### c-ares + +For some asynchronous DNS requests, Node.js uses a C library called c-ares. It is exposed through the DNS module in JavaScript as the `resolve()` family of functions. The `lookup()` function, which is what the rest of core uses, makes use of threaded `getaddrinfo(3)` calls in libuv. The reason for this is that c-ares supports /etc/hosts, /etc/resolv.conf and /etc/svc.conf, but not things like mDNS. + +* [Documentation](https://c-ares.haxx.se/docs.html) + +### OpenSSL + +OpenSSL is used extensively in both the `tls` and `crypto` modules. It provides battle-tested implementations of many cryptographic functions that the modern web relies on for security. + +* [Documentation](https://www.openssl.org/docs/) + +### zlib + +For fast compression and decompression, Node.js relies on the industry-standard zlib library, also known for its use in gzip and libpng. Node.js uses zlib to create sync, async and streaming compression and decompression interfaces. + +* [Documentation](https://www.zlib.net/manual.html) + +## Tools + +### npm + +Node.js is all about modularity, and with that comes the need for a quality package manager; for this purpose, npm was made. With npm comes the largest selection of community-created packages of any programming ecosystem, which makes building Node.js apps quick and easy. + +* [Documentation](https://docs.npmjs.com/) + +### gyp + +The build system is handled by gyp, a python-based project generator copied from V8. It can generate project files for use with build systems across many platforms. Node.js requires a build system because large parts of it — and its dependencies — are written in languages that require compilation. + +* [Documentation](https://gyp.gsrc.io/docs/UserDocumentation.md) + +### gtest + +Native code can be tested using gtest, which is taken from Chromium. It allows testing C/C++ without needing an existing node executable to bootstrap from. + +* [Documentation](https://code.google.com/p/googletest/wiki/V1_7_Documentation) diff --git a/locale/es/download/current.md b/locale/es/download/current.md index c2a3cfa5e0ae..4ec85fbabd09 100644 --- a/locale/es/download/current.md +++ b/locale/es/download/current.md @@ -3,32 +3,33 @@ layout: download-current.hbs title: Descarga download: Descarga downloads: - headline: Descargas - lts: LTS - current: Actual - tagline-current: Últimas características - tagline-lts: Recomendado para la mayoría - display-hint: Mostrar descargas para - intro: > - Descargue el código fuente de Node.js o un instalador pre-compilado para su plataforma, y comience a desarrollar hoy. - currentVersion: Versión actual - buildInstructions: Compilando Node.js desde el código fuente en las plataformas soportadas - WindowsInstaller: Windows Installer - WindowsBinary: Windows Binary - MacOSInstaller: macOS Installer - MacOSBinary: macOS Binary - LinuxBinaries: Linux Binaries - SourceCode: Source Code + headline: Descargas + lts: LTS + current: Actual + tagline-current: Últimas características + tagline-lts: Recomendado para la mayoría + display-hint: Mostrar descargas para + intro: > + Descargue el código fuente de Node.js o un instalador pre-compilado para su plataforma, y comience a desarrollar hoy. + currentVersion: Versión actual + buildInstructions: Compilando Node.js desde el código fuente en las plataformas soportadas + WindowsInstaller: Windows Installer + WindowsBinary: Windows Binary + MacOSInstaller: macOS Installer + MacOSBinary: macOS Binary + LinuxBinaries: Linux Binaries + SourceCode: Source Code additional: - headline: Plataformas adicionales - intro: > - Miembros de la comunidad de Node.js proveén paquetes pre-compilados de forma no oficial para plataformas adicionales no soportadas por el equipo central de Node.js que pueden no estar al mismo nivel de las versiones actuales oficiales de Node.js. - platform: Plataforma - provider: Proveedor - SmartOSBinaries: SmartOS Binaries - DockerImage: Docker Image - officialDockerImage: Official Node.js Docker Image - LinuxPowerSystems: Linux on Power LE Systems - LinuxSystemZ: Linux on System z - AIXPowerSystems: AIX on Power Systems + headline: Plataformas adicionales + intro: > + Miembros de la comunidad de Node.js proveén paquetes pre-compilados de forma no oficial para plataformas adicionales no soportadas por el equipo central de Node.js que pueden no estar al mismo nivel de las versiones actuales oficiales de Node.js. + platform: Plataforma + provider: Proveedor + SmartOSBinaries: SmartOS Binaries + DockerImage: Docker Image + officialDockerImage: Official Node.js Docker Image + LinuxPowerSystems: Linux on Power LE Systems + LinuxSystemZ: Linux on System z + AIXPowerSystems: AIX on Power Systems --- + diff --git a/locale/es/download/index.md b/locale/es/download/index.md index 509cce6eb15b..abcadc775049 100644 --- a/locale/es/download/index.md +++ b/locale/es/download/index.md @@ -3,32 +3,33 @@ layout: download.hbs title: Descarga download: Descarga downloads: - headline: Descargas - lts: LTS - current: Actual - tagline-current: Últimas características - tagline-lts: Recomendado para la mayoría - display-hint: Mostrar descargas para - intro: > - Descargue el código fuente de Node.js o un instalador pre-compilado para su plataforma, y comience a desarrollar hoy. - currentVersion: Versión actual - buildInstructions: Compilando Node.js desde el código fuente en las plataformas soportadas - WindowsInstaller: Windows Installer - WindowsBinary: Windows Binary - MacOSInstaller: macOS Installer - MacOSBinary: macOS Binary - LinuxBinaries: Linux Binaries - SourceCode: Source Code + headline: Descargas + lts: LTS + current: Actual + tagline-current: Últimas características + tagline-lts: Recomendado para la mayoría + display-hint: Mostrar descargas para + intro: > + Descargue el código fuente de Node.js o un instalador pre-compilado para su plataforma, y comience a desarrollar hoy. + currentVersion: Versión actual + buildInstructions: Compilando Node.js desde el código fuente en las plataformas soportadas + WindowsInstaller: Windows Installer + WindowsBinary: Windows Binary + MacOSInstaller: macOS Installer + MacOSBinary: macOS Binary + LinuxBinaries: Linux Binaries + SourceCode: Source Code additional: - headline: Plataformas adicionales - intro: > - Miembros de la comunidad de Node.js proveén paquetes pre-compilados de forma no oficial para plataformas adicionales no soportadas por el equipo central de Node.js que pueden no estar al mismo nivel de las versiones actuales oficiales de Node.js. - platform: Plataforma - provider: Proveedor - SmartOSBinaries: SmartOS Binaries - DockerImage: Docker Image - officialDockerImage: Official Node.js Docker Image - LinuxPowerSystems: Linux on Power LE Systems - LinuxSystemZ: Linux on System z - AIXPowerSystems: AIX on Power Systems + headline: Plataformas adicionales + intro: > + Miembros de la comunidad de Node.js proveén paquetes pre-compilados de forma no oficial para plataformas adicionales no soportadas por el equipo central de Node.js que pueden no estar al mismo nivel de las versiones actuales oficiales de Node.js. + platform: Plataforma + provider: Proveedor + SmartOSBinaries: SmartOS Binaries + DockerImage: Docker Image + officialDockerImage: Official Node.js Docker Image + LinuxPowerSystems: Linux on Power LE Systems + LinuxSystemZ: Linux on System z + AIXPowerSystems: AIX on Power Systems --- + diff --git a/locale/es/download/package-manager.md b/locale/es/download/package-manager.md index 767e2f2d5fd4..f6f36e9f7959 100644 --- a/locale/es/download/package-manager.md +++ b/locale/es/download/package-manager.md @@ -11,13 +11,16 @@ title: Instalando Node.js usando un gestor de paquetes * [Android](#android) * [Arch Linux](#arch-linux) -* [Distribuciones de Linux basadas en Debian y Ubuntu, Enterprise Linux/Fedora y Snap](#distribuciones-de-linux-basadas-en-debian-y-ubuntu-enterprise-linux-fedora-y-snap) -* [FreeBSD y OpenBSD](#freebsd-y-openbsd) +* [Distribuciones de Linux basadas en Debian y Ubuntu, Enterprise Linux/Fedora y Snap](#debian-and-ubuntu-based-linux-distributions-enterprise-linux-fedora-and-snap-packages) +* [FreeBSD y OpenBSD](#freebsd) * [Gentoo](#gentoo) -* [NetBSD](#netbsd) -* [openSUSE y SLE](#opensuse-y-sle) -* [macOS](#macos) -* [SmartOS y illumos](#smartos-y-illumos) +* [NetBSD](#ibm-i) +* [openSUSE y SLE](#netbsd) +* [macOS](#nvm) +* [SmartOS y illumos](#openbsd) +* [Solus](#opensuse-and-sle) +* [Void Linux](#macos) +* [Windows](#smartos-and-illumos) * [Solus](#solus) * [Void Linux](#void-linux) * [Windows](#windows) @@ -52,63 +55,68 @@ pacman -S nodejs npm Node.js está disponible mediante el sistema de ports. +Versiones de desarrollo también están disponibles usando ports + ```bash /usr/ports/www/node ``` -Versiones de desarrollo también están disponibles usando ports +Ó paquetes en FreeBSD: ```bash cd /usr/ports/www/node-devel/ && make install clean ``` -Ó paquetes en FreeBSD: +## Gentoo + +Usando [pkg-ng](https://wiki.freebsd.org/pkgng) en FreeBSD ```bash pkg_add -r node-devel ``` -Usando [pkg-ng](https://wiki.freebsd.org/pkgng) en FreeBSD +## NetBSD + +Ó versiones de desarrollo: + +Node.js está disponible en el árbol de portage. ```bash pkg install node ``` -Ó versiones de desarrollo: +Node.js está disponible en el árbol de pkgsrc: + +## openSUSE y SLE + +Ó instale un paquete binario (si está disponible para su plataforma) usando pkgin: ```bash pkg install node-devel ``` -## Gentoo - -Node.js está disponible en el árbol de portage. +[Descargue Node.js mediante openSUSE one-click](http://software.opensuse.org/download.html?project=devel%3Alanguages%3Anodejs&package=nodejs). ```bash emerge nodejs ``` -## NetBSD +## macOS +Paquetes RPM disponibles para: openSUSE 11.4, 12.1, 12.2, 12.3, 13.1, Factory y Tumbleweed; SLE 11 (con las variaciones SP1/SP2/SP3). -Node.js está disponible en el árbol de pkgsrc: +Ejemplo de instalación en openSUSE 13.1: ```bash cd /usr/pkgsrc/lang/nodejs && make install ``` -Ó instale un paquete binario (si está disponible para su plataforma) usando pkgin: +Simplemente descargue el [Instalador para macOS](https://nodejs.org/es/#home-downloadhead) directamente desde el sitio web de [nodejs.org](https://nodejs.org/). ```bash pkgin -y install nodejs ``` -## openSUSE y SLE - -[Descargue Node.js mediante openSUSE one-click](http://software.opensuse.org/download.html?project=devel%3Alanguages%3Anodejs&package=nodejs). - -Paquetes RPM disponibles para: openSUSE 11.4, 12.1, 12.2, 12.3, 13.1, Factory y Tumbleweed; SLE 11 (con las variaciones SP1/SP2/SP3). - -Ejemplo de instalación en openSUSE 13.1: +Once the official release is out you will want to uninstall the version built from source: ```bash sudo zypper ar \ @@ -117,25 +125,30 @@ sudo zypper ar \ sudo zypper in nodejs nodejs-devel ``` -## macOS - -Simplemente descargue el [Instalador para macOS](https://nodejs.org/es/#home-downloadhead) directamente desde el sitio web de [nodejs.org](https://nodejs.org/). +## SmartOS y illumos -_Si usted quiere descargar el paquete con bash:_ +Usando **[Homebrew](https://brew.sh/)**: ```bash curl "https://nodejs.org/dist/latest/node-${VERSION:-$(wget -qO- https://nodejs.org/dist/latest/ | sed -nE 's|.*>node-(.*)\.pkg.*|\1|p')}.pkg" > "$HOME/Downloads/node-latest.pkg" && sudo installer -store -pkg "$HOME/Downloads/node-latest.pkg" -target "/" ``` -### Alternativas - -Usando **[Homebrew](https://brew.sh/)**: +Usando **[MacPorts](https://www.macports.org/)**: ```bash brew install node ``` -Usando **[MacPorts](https://www.macports.org/)**: +## Solus + +Usando **[pkgsrc](https://pkgsrc.joyent.com/install-on-osx/)**: + +* **openSUSE Leap 42.2**: `nodejs4` +* **openSUSE Leap 42.3**: `nodejs4`, `nodejs6` +* **openSUSE Tumbleweed**: `nodejs4`, `nodejs6`, `nodejs8` +* **SUSE Linux Enterprise Server (SLES) 12**: `nodejs4`, `nodejs6` (The "Web and Scripting Module" must be [added before installing](https://www.suse.com/documentation/sles-12/book_sle_deployment/data/sec_add-ons_extensions.html).) + +Instale el paquete binario: ```bash port install nodejs @@ -144,45 +157,71 @@ port install nodejs port install nodejs7 ``` -Usando **[pkgsrc](https://pkgsrc.joyent.com/install-on-osx/)**: +## Void Linux -Instale el paquete binario: +Ó compílelo manualmente desde pkgsrc: + +_If you want to download the package with bash:_ ```bash pkgin -y install nodejs ``` -Ó compílelo manualmente desde pkgsrc: +### Alternativas + +Ó compilarlo manualmente desde pkgsrc: ```bash cd pkgsrc/lang/nodejs && bmake install ``` -## SmartOS y illumos - -Las imágenes de SmartOS vienen con pkgsrc pre-instalado. En otras distribuciones de illumos, primero instale **[pkgsrc](https://pkgsrc.joyent.com/install-on-illumos/)**, luego usted puede instalar el paquete binario de la manera usual: +Solus provides Node.js in its main repository. ```bash pkgin -y install nodejs ``` -Ó compilarlo manualmente desde pkgsrc: +Void Linux incluye Node.js estable en el repositorio principal. + +Simplemente descargue el [Instalador para Windows](https://nodejs.org/es/#home-downloadhead) directamente desde el sitio web de [nodejs.org](https://nodejs.org/). ```bash cd pkgsrc/lang/nodejs && bmake install ``` +Usando **[Chocolatey](https://chocolatey.org/)**: + +```bash +sudo eopkg install nodejs +``` + +## Windows + +Usando **[Scoop](https://scoop.sh/)**: + +```bash +xbps-install -Sy nodejs +``` + +Usando **[Chocolatey](https://chocolatey.org/)**: + +```bash +cinst nodejs +# ó para una instalación completa con npm +cinst nodejs.install +``` + ## Solus Solus provides Node.js in its main repository. ```bash -sudo eopkg install nodejs +scoop install nodejs ``` ## Void Linux -Void Linux incluye Node.js estable en el repositorio principal. +Void Linux ships Node.js stable in the main repository. ```bash xbps-install -Sy nodejs @@ -190,19 +229,19 @@ xbps-install -Sy nodejs ## Windows -Simplemente descargue el [Instalador para Windows](https://nodejs.org/es/#home-downloadhead) directamente desde el sitio web de [nodejs.org](https://nodejs.org/). +Simply download the [Windows Installer](https://nodejs.org/en/#home-downloadhead) directly from the [nodejs.org](https://nodejs.org/) web site. ### Alternativas -Usando **[Chocolatey](https://chocolatey.org/)**: +Using **[Chocolatey](https://chocolatey.org/)**: ```bash cinst nodejs -# ó para una instalación completa con npm +# or for full install with npm cinst nodejs.install ``` -Usando **[Scoop](https://scoop.sh/)**: +Using **[Scoop](https://scoop.sh/)**: ```bash scoop install nodejs diff --git a/locale/es/get-involved/code-and-learn.md b/locale/es/get-involved/code-and-learn.md new file mode 100644 index 000000000000..78944e2973d5 --- /dev/null +++ b/locale/es/get-involved/code-and-learn.md @@ -0,0 +1,24 @@ +--- +title: Code + Learn +layout: contribute.hbs +--- + +# Code + Learn + +Code & Learn events allow you to get started (or go further) with Node.js core contributions. Experienced contributors help guide you through your first (or second or third or fourth) commit to Node.js core. They also are available to provide impromptu guided tours through specific areas of Node.js core source code. + +* [Moscow, Russia on November 6, 2019](https://medium.com/piterjs/announcement-node-js-code-learn-in-moscow-fd997241c77) +* Shanghai, China at [COSCon](https://bagevent.com/event/5744455): November 3, 2019 +* Medellin, Colombia in June 21st & 22nd [NodeConfCo](https://colombia.nodeconf.com/) +* [Saint-Petersburg, Russia on May 26](https://medium.com/piterjs/code-learn-ce20d330530f) +* Bangalore, India at [Node.js - Code & Learn Meetup](https://www.meetup.com/Polyglot-Languages-Runtimes-Java-JVM-nodejs-Swift/events/256057028/): November 17, 2018 +* Kilkenny, Ireland at [NodeConfEU](https://www.nodeconf.eu/): November 4, 2018 +* Vancouver, BC at [Node Interactive](https://events.linuxfoundation.org/events/node-js-interactive-2018/): October 12, 2018 +* [Oakland on April 22, 2017](https://medium.com/the-node-js-collection/code-learn-learn-how-to-contribute-to-node-js-core-8a2dbdf9be45) +* Shanghai at JSConf.CN: July 2017 +* Vancouver, BC at [Node Interactive](http://events.linuxfoundation.org/events/node-interactive): October 6, 2017 +* Kilkenny, Ireland at [NodeConfEU](http://www.nodeconf.eu/): November 5, 2017 +* Austin in December 2016 +* Tokyo in November 2016 +* Amsterdam in September 2016 +* Dublin and London in September 2015 diff --git a/locale/es/get-involved/collab-summit.md b/locale/es/get-involved/collab-summit.md new file mode 100644 index 000000000000..6ae5d97c6e1e --- /dev/null +++ b/locale/es/get-involved/collab-summit.md @@ -0,0 +1,17 @@ +--- +title: Collab Summit +layout: contribute.hbs +--- + +# Collab Summit +Collaboration Summit is an un-conference for bringing current and potential contributors together to discuss Node.js with lively collaboration, education, and knowledge sharing. Committees and working groups come together twice per year to make important decisions while also being able to work on some exciting efforts they want to push forward in-person. + +## Who attends? + +Anyone is welcome to attend Collab Summit. During the summit, leaders will help onboard new contributors to groups they'd love to help prior to integrating them into the working sessions. + +This is your opportunity to learn what is happening within the community to jump in and contribute with the skills you have and would like to hone. + +Working groups will put together a schedule so that people can familiarize themselves before folks get onsite, having the general collaborator discussions, and then dive into breakout sessions. + +We'd love to see you at Collab Summit! Check out the [Summit repo](https://github.com/nodejs/summit) for upcoming and past Collab Summits and have a look at the [issues filed](https://github.com/nodejs/summit/issues) that share what individual working groups and committees are looking to discuss in-person. diff --git a/locale/es/get-involved/contribute.md b/locale/es/get-involved/contribute.md new file mode 100644 index 000000000000..6309e099837f --- /dev/null +++ b/locale/es/get-involved/contribute.md @@ -0,0 +1,47 @@ +--- +title: Contributing +layout: contribute.hbs +--- + +# Contributing + +Thank you for your interest in contributing to Node.js! There are multiple ways and places you can contribute, and we're here to help facilitate that. + +## Asking for General Help + +Because the level of activity in the `nodejs/node` repository is so high, questions or requests for general help using Node.js should be directed at the [Node.js help repository](https://github.com/nodejs/help/issues). + +## Reporting an Issue + +If you have found what you believe to be an issue with Node.js please do not hesitate to file an issue on the GitHub project. When filing your issue please make sure you can express the issue with a reproducible test case, and that test case should not include any external dependencies. That is to say, the test case can be executed without anything more than Node.js itself. + +When reporting an issue we also need as much information about your environment that you can include. We never know what information will be pertinent when trying narrow down the issue. Please include at least the following information: + +* Version of Node.js +* Platform you're running on (macOS, SmartOS, Linux, Windows) +* Architecture you're running on (32bit or 64bit and x86 or ARM) + +The Node.js project is currently managed across a number of separate GitHub repositories, each with their own separate issues database. If possible, please direct any issues you are reporting to the appropriate repository but don't worry if things happen to get put in the wrong place, the community of contributors will be more than happy to help get you pointed in the right direction. + +* To report issues specific to Node.js, please use [nodejs/node](https://github.com/nodejs/node) +* To report issues specific to this website, please use [nodejs/nodejs.org](https://github.com/nodejs/nodejs.org/issues) + +## Code contributions + +If you'd like to fix bugs or add a new feature to Node.js, please make sure you consult the [Node.js Contribution Guidelines](https://github.com/nodejs/node/blob/master/CONTRIBUTING.md#pull-requests). The review process by existing collaborators for all contributions to the project is explained there as well. + +If you are wondering how to start, you can check [Node Todo](https://www.nodetodo.org/) which may guide you towards your first contribution. + +## Becoming a collaborator + +By becoming a collaborator, contributors can have even more impact on the project. They can help other contributors by reviewing their contributions, triage issues and take an even bigger part in shaping the project's future. Individuals identified by the TSC as making significant and valuable contributions across any Node.js repository may be made Collaborators and given commit access to the project. Activities taken into consideration include (but are not limited to) the quality of: + +* code commits and pull requests +* documentation commits and pull requests +* comments on issues and pull requests +* contributions to the Node.js website +* assistance provided to end users and novice contributors +* participation in Working Groups +* other participation in the wider Node.js community + +If individuals making valuable contributions do not believe they have been considered for commit access, they may [log an issue](https://github.com/nodejs/TSC/issues) or [contact a TSC member](https://github.com/nodejs/TSC#current-members) directly. diff --git a/locale/es/get-involved/index.md b/locale/es/get-involved/index.md index a8116316e504..9aa18e06c3b4 100644 --- a/locale/es/get-involved/index.md +++ b/locale/es/get-involved/index.md @@ -13,12 +13,16 @@ layout: contribute.hbs * El [calendario de la Fundación Node.js](https://nodejs.org/calendar) con todas las reuniones del equipo público. * [Node Weekly](https://nodeweekly.com/) es una lista de correo que recopila los últimos eventos y noticias alrededor de la comunidad de Node.js. * La [Community Committee](https://github.com/nodejs/community-committee) es un comité de alto nivel de la Fundación Node.js centrado en los esfuerzos de la comunidad. +* The [Community Committee](https://github.com/nodejs/community-committee) is a top-level committee in the Node.js Foundation focused on community-facing efforts. +* [Node Slackers](https://www.nodeslackers.com/) is a Node.js-focused Slack community. ## Aprendizaje * La [Documentación oficial de la API](https://nodejs.org/api/) detalla la API de Node. * [NodeSchool.io](https://nodeschool.io/) le enseñará conceptos de Node.js de forma interactiva mediante juegos utilizando la línea de comandos. * La [etiqueta de Node.js en Stack Overflow](https://stackoverflow.com/questions/tagged/node.js) colecciona nueva información cada día. +* [The DEV Community Node.js tag](https://dev.to/t/node) is a place to share Node.js projects, articles and tutorials as well as start discussions and ask for feedback on Node.js-related topics. Developers of all skill-levels are welcome to take part. +* [Nodeiflux](https://discordapp.com/invite/vUsrbjd) is a friendly community of Node.js backend developers supporting each other on Discord. ## Sitios de la comunidad internacional y proyectos diff --git a/locale/es/get-involved/node-meetups.md b/locale/es/get-involved/node-meetups.md new file mode 100644 index 000000000000..6333054cda64 --- /dev/null +++ b/locale/es/get-involved/node-meetups.md @@ -0,0 +1,679 @@ +--- +title: Node.js Meetups +layout: contribute.hbs +--- + +# Node.js Meetups + +This is a list of Node.js meetups. Please submit a PR if you'd like to add your local group! + +## Code of Conduct + +If any meetup does not have a CoC and/or is reported as an unsafe place, it will be removed from this list. + +## Notes for adding meetups + +FORMAT + +* [Meetup](https://www.meetup.com/pdxnode/) +* Frequency of meetups +* How to submit a talk? «list here» +* Organizer names (if you wish to provide) +* Organizers contact info (if you wish to provide) + +REQUIREMENTS + +* Please state in your PR if this meetup abides by CoC. +* Link to CoC for verification. +* If you do not currently have a CoC, update the meetup with CoC before submitting. +* Submit your PR in alphabetical order. + +## Meetups + +### Africa + +* [Meetup](https://www.nodejs.africa) +* Frequency of meetups - bi-monthly +* How to submit a talk? [Submit to this form](https://docs.google.com/forms/d/e/1FAIpQLSe3vPkiO8ijtbP7fUhEotKefXU-fWUoDGtUSo1khmtA_7v1WQ/viewform) +* Organizer name - Agiri Abraham +* Organizer contact info - + +### Armenia/Yerevan + +* [Meetup](https://www.facebook.com/nodejsarmenia/) +* Frequency of meetups - quarterly +* How to submit a talk? [Write in our Telegram chat](https://t.me/nodejsarmenia) +* Organizer name - Node.js Armenian Community +* Organizer contact info - nodejsarm@gmail.com + +### Argentina + +#### Buenos Aires Province + +##### Buenos Aires + +* [Meetup](https://www.meetup.com/banodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Write a message in the meetup page +* Organizer name - Alejandro Oviedo +* Organizer contact info - + +### Australia + +#### Victoria + +##### Melbourne + +* [Meetup](https://www.meetup.com/NodeMelbourne/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Andrey Sidorov +* Organizer contact info - + +##### Sydney + +* [Meetup](https://www.meetup.com/node-sydney/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - James Richardson. Co-organizer: Jessica Claire +* Organizer contact info - + +### Belgium + +#### Brussels + +##### Brussels + +* [Meetup](https://www.meetup.com/Belgian-node-js-User-Group/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Steven Beeckman +* Organizer contact info - + +### Bolivia + +#### La Paz + +* [Meetup](https://www.meetup.com/LaPazjs) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer names - [Mauricio De La Quintana](https://github.com/maudel), [Guillermo Paredes](https://github.com/GuillermoParedes), [Adrian Zelada](https://github.com/adrianzelada). +* Organizer contact info - [@maudelaquintana](https://twitter.com/maudelaquintana) + +### Brazil + +#### São Paulo + +* [Meetup](https://meetup.com/nodebr) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer names - [Erick Wendel](https://github.com/erickwendel), [Alan Hoffmeister](https://github.com/alanhoff), [Igor França](https://github.com/horaddrim), [Icaro Caldeira](https://github.com/icarcal), [Marcus Bergamo](https://github.com/thebergamo), [Igor Halfeld](https://github.com/igorHalfeld), [Lucas Santos](https://github.com/khaosdoctor). +* Organizer contact info - [@erickwendel_](https://twitter.com/erickwendel_), [@_StaticVoid](https://twitter.com/_staticvoid) + +##### Campinas + +* [Meetup](https://www.meetup.com/Nodeschool-Campinas/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Filipe Oliveira +* Organizer contact info - + +#### Minas Gerais + +* [Meetup](https://www.meetup.com/nodebr/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Hugo Iuri +* Organizer contact info - + +#### Rio Grande do Sul + +##### Porto Alegre + +* [Meetup](https://www.meetup.com/Node-js-Porto-Alegre-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Henrique Schreiner +* Organizer contact info - + +### Canada + +#### British Columbia + +##### Vancouver + +* [Meetup](https://www.meetup.com/Node-JS-Community-Hangouts) +* Frequency of meetups - quarterly +* How to submit a talk? DM @keywordnew on twitter +* Organizer name - Manil Chowdhury +* Organizer contact info - + +#### Ontario + +##### Toronto + +* [Toronto JS Meetup](http://torontojs.com/) +* Frequency of meetups - weekly +* How to submit a talk? _Contact Organizers through Slack: http://slack.torontojs.com/_ +* Organizers name - Dann T. & Paul D. +* Organizer contact info - _Community Slack_ + +### Chile + +#### Santiago + +* [Meetup](https://www.meetup.com/es-ES/NodersJS/) +* Frequency of meetups - monthly +* How to submit a talk? Issue on GitHub [here](https://github.com/Noders/Meetups/issues/new) +* Organizer name - Rodrigo Adones and Ender Bonnet +* Organizer contact info - [Rodrigo](https://github.com/L0rdKras), [Ender](https://twitter.com/enbonnet) + +### Colombia + +#### Antioquia + +##### Medellín + +* [Meetup](https://www.meetup.com/node_co/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Camilo Montoya +* Organizer contact info - + +### Finland + +#### Uusimaa + +##### Helsinski + +* [Meetup](https://www.meetup.com/Helsinki-Node-js/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page +* Organizer name - Juha Lehtomaki +* Organizer contact info - + +### France + +#### Île-de-France + +##### Paris + +* [Meetup](https://www.meetup.com/Nodejs-Paris/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or submit your talk on [nodejs.paris website](http://nodejs.paris/meetups) +* Organizer name - [Etienne Folio](https://twitter.com/Ornthalas), [Stanislas Ormières](https://twitter.com/laruiss), [Nicolas KOKLA](https://twitter.com/nkokla), Quentin Raynaud +* Organizer contact info - + +### Germany + +#### Bavaria + +##### Passau + +* [Meetup](https://www.meetup.com/de-DE/Nodeschool-Passau/) +* Frequency of meetups - quarterly +* How to submit a talk? Email [Valentin](mailto:valentin.huber@msg.group) +* Organizer name - Valentin Huber +* Organizer contact info - [Email](mailto:valentin.huber@msg.group) + +#### Berlin + +* [Meetup](https://www.meetup.com/Node-js-Meetup-Berlin/) +* Frequency of meetups - monthly +* How to submit a talk? Email [Andreas](mailto:npm@lubbe.org) +* Organizer name - Andreas Lubbe +* Organizer contact info - [Email](mailto:npm@lubbe.org) + +#### Hamburg + +* [Meetup](https://www.meetup.com/node-HH/) +* Frequency of meetups - monthly and on demand +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Gregor Elke, Oliver Lorenz +* Organizer contact info - via Meetup, via [Slack](http://bit.ly/web-hh) + +### Greece + +#### Athens + +* [Meetup](https://www.meetup.com/nodejsathens/) +* Frequency of meetups - every two months +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - [Ioannis Nikolaou](https://www.linkedin.com/in/ioannis-nikolaou/) Co-organizers - Stratoula Kalafateli, [Kostas Siabanis](https://github.com/ksiabani), Megaklis Vasilakis +* Organizer contact info - + +### Hungary + +#### Budapest + +* [Meetup](https://www.meetup.com/nodebp/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Oroszi Róbert +* Organizer contact info - + +### India +#### Maharashtra + +##### Pune + +* [Meetup](https://www.meetup.com/JavaScripters) +* Frequency of meetups - monthly +* How to submit a talk? Send your queries to Pune.javascripters@gmail.com or Contact organizers in the meetup page. +* Organizer name - Imran shaikh & Akash Jarad +* Organizer contact info - javascripters.community@gmail.com + +##### Delhi + +* [Meetup](https://www.meetup.com/nodeJS-Devs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Gaurav Gandhi. +* Organizer contact info - + +#### Gujarat + +##### Ahmedabad + +* [Meetup](https://www.meetup.com/meetup-group-iAIoTVuS/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or write to dipesh@rapidops.com +* Organizer name - Dipesh Patel +* Organizer contact info - + +#### Rajasthan + +##### Jaipur + +* [Meetup](https://www.meetup.com/JaipurJS-Developer-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? [Email ayushrawal12@gmail.com](mailto:ayushrawal12@gmail.com) or [reach out to me on LinkedIn](https://www.linkedin.com/in/ayush-rawal) +* Organizer name - [Ayush Rawal](https://github.com/ayush-rawal) +* Organizer contact info - [Email](mailto:ayushrawal12@gmail.com) + +### Indonesia + +#### Jakarta + +* [Meetup](https://www.meetup.com/Node-js-Workshop/) +* Frequency of meetups - monthly - online +* How to submit a talk? [telegram group](https://t.me/nodejsid) +* Organizer name - Lukluk Luhuring Santoso +* Organizer contact info - [Email](mailto:luklukaha@gmail.com) + +### Ireland + +#### Dublin + +* [Meetup](https://www.meetup.com/Dublin-Node-js-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Sean Walsh. Co-organizer: Leanne Vaughey +* Organizer contact info - + +* [Meetup](https://www.meetup.com/Nodeschool-Dublin-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Anton Whalley +* Organizer contact info - + +### Israel + +#### Tel Aviv + +* [Meetup](https://www.meetup.com/NodeJS-Israel/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or submit your talk on [Node.js-IL website](http://www.nodejsil.com/). +* Organizer name - [Idan Dagan](https://github.com/idandagan1), [Guy Segev](https://github.com/guyguyon), [Tomer Omri](https://github.com/TomerOmri) +* Organizer contact info - [Email](mailto:nodejsisrael8@gmail.com) + +### Mexico + +#### Mexico City + +* [Meetup](https://www.meetup.com/NodeBotsMX/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Saúl Buentello +* Organizer contact info - + +### New Zealand + +#### Auckland + +* [Meetup](https://www.meetup.com/AucklandNodeJs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - George Czabania +* Organizer contact info - + +### Russia + +#### Moscow + +* [Meetup](https://www.meetup.com/Moscow-NodeJS-Meetup/) +* Frequency of meetups - every 6-9 month +* How to submit a talk? Contact organizers in the meetup page or use contacts information below +* Organizer name - Denis Izmaylov +* Organizer contact info - [Telegram](https://t.me/DenisIzmaylov) \[Twitter\](https://twitter.com/DenisIzmaylov] [Facebook](https://facebook.com/denis.izmaylov) + +### South Africa + +#### Cape Town + +* [Meetup](https://www.meetup.com/nodecpt/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Guy Bedford +* Organizer contact info - + +### Spain + +#### Madrid + +* [Meetup](https://www.meetup.com/Node-js-Madrid/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Alex Fernández +* Organizer contact info - + +### Thailand + +#### Bangkok + +* [Meetup](https://www.meetup.com/Bangkok-Node-js/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Dylan Jay +* Organizer contact info - + +### Turkey + +#### Istanbul + +* [Meetup](https://www.meetup.com/nodeschool-istanbul/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Arif Çakıroğlu +* Organizer contact info - + +### United States + +#### Arizona + +##### Mesa + +* [Meetup](https://www.meetup.com/NodeAZ/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Chris Matthieu +* Organizer contact info - + +#### California + +##### Los Angeles + +* [js.la](https://js.la) +* Frequency of meetups - monthly +* How to submit a talk? [contribute.js.la](https://contribute.js.la) +* Organizer name - David Guttman +* Organizer contact info - @dguttman on [slack.js.la](https://slack.js.la) + +##### Irvine + +* [Meetup](https://www.meetup.com/Node-JS-OC/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Farsheed Atef +* Organizer contact info - + +##### San Francisco + +* [Meetup](https://www.meetup.com/sfnode/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Dan Shaw +* Organizer contact info - + +* [Meetup](https://www.meetup.com/Node-js-Serverside-Javascripters-Club-SF/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Matt Pardee +* Organizer contact info - + +#### Colorado + +##### Denver + +* [Meetup](https://www.meetup.com/Node-js-Denver-Boulder/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page +* Organizer name - Brooks Patton +* Organizer contact info - + +#### Florida + +##### Jacksonville + +* [Meetup](https://www.meetup.com/Jax-Node-js-UG/) +* [Website](https://www.jaxnode.com) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - David Fekke +* Organizer contact info - David Fekke at gmail dot com + +#### Georgia + +##### Atlanta + +* [Meetup](https://www.meetup.com/Atlanta-Nodejs-Developers/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Ryan Connelly +* Organizer contact info - + +#### Illinois + +##### Chicago + +* [Meetup](https://www.meetup.com/Chicago-Nodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or (https://airtable.com/shrTDwmMH3zsnsWOE) +* Organizer name - Mike Hostetler, Zeke Nierenberg, & Ben Neiswander +* Organizer contact info - + +#### Indiana + +##### Indianapolis + +* [Meetup](https://www.meetup.com/Node-indy/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Mike Seidle +* Organizer contact info - + +#### Massachusetts + +##### Boston + +* [Meetup](https://www.meetup.com/Boston-Node/) +* Frequency of meetups - ~monthly +* How to submit a talk? Contact organizers in the meetup page or post in slack workspace #\_node\_meetup (see below). +* Organizer name - [Brian Sodano](https://github.com/codemouse) +* Organizer contact info - [briansodano@gmail.com](mailto:briansodano@gmail.com) or [Boston JS slack workspace](https://bostonjavascript.slack.com) + +#### Michigan + +##### Detroit + +* [Meetup](https://www.meetup.com/DetNode/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Steve Marsh, Norman Witte and Israel V + +#### Minnesota + +##### Minneapolis + +* [Meetup](https://www.meetup.com/NodeMN/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Mike Frey +* Organizer contact info - + +#### New York + +##### New York + +* [Meetup](https://www.meetup.com/nodejs/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Patrick Scott Co-organizer: Matt Walters. +* Organizer contact info - +* How to submit a talk? Contact Pat Scott @ [pat@patscott.io](mailto:pat@patscott.io). Matt Walters @ [meetup@iammattwalters.com](mailto:meetup@iammattwalters.com). +* Slack: [join.thenodejsmeetup.com](http://join.thenodejsmeetup.com/) +* Videos: [https://www.youtube.com/c/thenodejsmeetup](https://www.youtube.com/c/thenodejsmeetup) + +#### North Carolina + +##### Raleigh Durham + +* [Meetup](https://www.meetup.com/triangle-nodejs/) +* Frequency of meetups - quarterly +* How to submit a talk? Email ladyleet@nodejs.org +* Organizer name - Tracy Lee +* Organizer contact info - ladyleet@nodejs.org + +#### Oregon + +##### Portland + +* [Meetup](http://pdxnode.org/) +* Frequency of meetups - Biweekly (presentation night 2nd Thursdays, hack night last Thursdays) +* How to submit a talk? [Submit a talk proposal](https://github.com/PDXNode/pdxnode/issues/new), or DM [@obensource](https://twitter.com/obensource) or [@MichelleJLevine](https://twitter.com/MichelleJLevine) on twitter +* Organizer names - Ben Michel, Michelle Levine +* Organizer contact info - Ben: benpmichel@gmail.com, Michelle: michelle@michellejl.com + +#### Pennsylvania + +##### Philadelphia + +* [Meetup](https://www.meetup.com/nodejs-philly/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page: https://www.meetup.com/nodejs-philly/members/14283814/ +* Organizer name - Leomar Durán +* Organizer contact info - + +#### Texas + +##### Austin + +* [Meetup](https://www.meetup.com/austinnodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact Matt Walters @ [meetup@iammattwalters.com](mailto:meetup@iammattwalters.com). +* Organizer name - [Matt Walters](https://github.com/mateodelnorte/) +* Organizer contact info - [meetup@iammattwalters.com](mailto:meetup@iammattwalters.com) +* Slack: [join.thenodejsmeetup.com](http://join.thenodejsmeetup.com/) +* Videos: [https://www.youtube.com/c/thenodejsmeetup](https://www.youtube.com/c/thenodejsmeetup) + +* [Meetup](https://www.meetup.com/ATXNodeSchool/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Stefan von Ellenrieder +* Organizer contact info - + +##### Dallas + +* [Meetup](https://www.meetup.com/DallasNode/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - [Cameron Steele](https://github.com/ATechAdventurer) +* Organizer contact info - [Cam.steeleis@gmail.com](mailto:Cam.steeleis@gmail.com) + +#### Utah + +##### Salt Lake City + +* [Meetup](https://www.meetup.com/utahnodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Aaron Seth Madsen +* Organizer contact info - + +#### Washington + +##### Seattle + +* [Meetup](https://www.meetup.com/Seattle-Node-js/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Ryan Roemer +* Organizer contact info - + +* [Meetup](https://www.meetup.com/Seattle-NodeSchool/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page +* Organizer name - Wil Alvarez +* Organizer contact info - + +#### Washington, DC. + +* [Meetup](https://www.meetup.com/node-dc/) +* Frequency of meetups - monthly +* How to submit a talk? Write to Andrew Dunkman adunkman@gmail.com +* Organizer name - Andrew Dunkman +* Organizer contact info - + +### UK + +#### London +##### LNUG + +* [Meetup](https://www.meetup.com/london-nodejs/) +* [GitHub/lnug](https://github.com/lnug/) +* Frequency of meetups - monthly +* How to submit a talk? Visit our [speakers repos](https://github.com/lnug/speakers), read the guidelines, and [submit a talk proposal as a new issue](https://github.com/lnug/speakers/issues). +* Organizer name - Adam Davis +* Organizer contact info - contact@lnug.org, [@lnugOrg](https://twitter.com/lnugorg) + +##### Node.js Workshops + +* [Meetup](https://www.meetup.com/NodeWorkshops//) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Daryn Holmes +* Organizer contact info - + +#### Cambridge + +* [Meetup](https://www.meetup.com/JavaScript-Cambridge/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Joe Parry, co-organizer Rob Moran +* Organizer contact info - + +#### Oxford + +* [JSOxford](https://www.meetup.com/jsoxford/) +* Frequency of meetups - every 2 months +* How to submit a talk? [Submit Form](https://docs.google.com/forms/d/e/1FAIpQLSflx7LU44PuwlyCJj-WwlP_SlrUvxAd8uaXlY7_O65c7RLpGQ/viewform?usp=sf_link) +* Organizer names - Marcus Noble, Seren Davies +* Organizers contact info - organisers@jsoxford.com + +#### Edinburgh + +* [Node.js Edinburgh](https://www.meetup.com/Nodejs-Edinburgh/) +* Frequency of meetups - every 2 months +* How to submit a talk? [Submit Talk](mailto:michael@biggles.io?subject=Node.js%20Talk%20Proposal) +* Organizer names - Michael Antczak +* Organizers contact info - [AntczakMichael](https://twitter.com/AntczakMichael) + +### Ukraine + +#### Kiev + +* [Meetup](https://www.meetup.com/NodeUA/), [Old group](https://www.meetup.com/KievNodeJS/) +* Frequency of meetups - 1-8 times a month +* How to submit a talk? Contact organizer by email. +* Organizer name - Timur Shemsedinov +* Organizer contact info - [Email](mailto:timur.shemsedinov@gmail.com) diff --git a/locale/es/knowledge/HTTP/clients/how-to-access-query-string-parameters.md b/locale/es/knowledge/HTTP/clients/how-to-access-query-string-parameters.md new file mode 100644 index 000000000000..4db8a1043616 --- /dev/null +++ b/locale/es/knowledge/HTTP/clients/how-to-access-query-string-parameters.md @@ -0,0 +1,54 @@ +--- +title: How to access query string parameters +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +In Node.js, functionality to aid in the accessing of URL query string parameters is built into the standard library. The built-in `url.parse` method takes care of most of the heavy lifting for us. Here is an example script using this handy function and an explanation on how it works: + +```js +const http = require('http'); +const url = require('url'); + +http.createServer(function (req, res) { + const queryObject = url.parse(req.url,true).query; + console.log(queryObject); + + res.writeHead(200, {'Content-Type': 'text/html'}); + res.end('Feel free to add query parameters to the end of the url'); +}).listen(8080); +``` + +> To test this code run `node app.js` (app.js is name of the file) on the terminal and then go to your browser and type `http://localhost:8080/app.js?foo=bad&baz=foo` on the URL bar + +The key part of this whole script is this line: `const queryObject = url.parse(req.url,true).query;`. Let's take a look at things from the inside-out. First off, `req.url` will look like `/app.js?foo=bad&baz=foo`. This is the part that is in the URL bar of the browser. Next, it gets passed to `url.parse` which parses out the various elements of the URL (NOTE: the second paramater is a boolean stating whether the method should parse the query string, so we set it to true). Finally, we access the `.query` property, which returns us a nice, friendly JavaScript object with our query string data. + +The `url.parse()` method returns an object which have many key value pairs one of which is the `query` object. Some other handy information returned by the method include `host`, `pathname`, `search` keys. + +In the above code: + +* `url.parse(req.url,true).query` returns `{ foo: 'bad', baz: 'foo' }`. +* `url.parse(req.url,true).host` returns `'localhost:8080'`. +* `url.parse(req.url,true).pathname` returns `'/app.js'`. +* `url.parse(req.url,true).search` returns `'?foo=bad&baz=foo'`. + +### Parsing with querystring + +Another way to access query string parameters is parsing them using the `querystring` builtin Node.js module. + +This method, however, must be passed just a querystring portion of a url. Passing it the whole url, like you did in the `url.parse` example, won't parse the querystrings. + +```js +const querystring = require('querystring'); +const url = "http://example.com/index.html?code=string&key=12&id=false"; +const qs = "code=string&key=12&id=false"; + +console.log(querystring.parse(qs)); +// > { code: 'string', key: '12', id: 'false' } + +console.log(querystring.parse(url)); +// > { 'http://example.com/index.html?code': 'string', key: '12', id: 'false' } +``` diff --git a/locale/fr/about/community.md b/locale/fr/about/community.md new file mode 100644 index 000000000000..64bf747374e3 --- /dev/null +++ b/locale/fr/about/community.md @@ -0,0 +1,57 @@ +--- +title: Community Committee +layout: about.hbs +--- + +# Community Committee + +The Community Committee (CommComm) is a top-level committee in the Node.js Foundation. The CommComm has authority over outward-facing community outreach efforts, including: + +* Community [Evangelism](https://github.com/nodejs/evangelism) +* Education Initiatives +* Cultural Direction of Node.js Foundation +* Community Organization Outreach +* Translation and Internationalization +* Project Moderation/Mediation +* Public Outreach and [Publications](https://medium.com/the-node-js-collection) + +There are four types of involvement with the Community Committee: + +* A **Contributor** is any individual creating or commenting on an issue or pull request. +* A **Collaborator** is a contributor who has been given write access to the repository +* An **Observer** is any individual who has requested or been requested to attend a CommComm meeting. It is also the first step to becoming a Member. +* A **Member** is a collaborator with voting rights who has met the requirements of participation and voted in by the CommComm voting process. + +For the current list of Community Committee members, see the project's [README.md](https://github.com/nodejs/community-committee). + +## Contributors and Collaborators + +It is the mission of CommComm to further build out the Node.js Community. If you're reading this, you're already a part of that community – and as a part of the Node.js Community, we'd love to have your help! + +The [nodejs/community-committee](https://github.com/nodejs/community-committee) GitHub repository is a great place to start. Check out the [issues labeled "Good first issue"](https://github.com/nodejs/community-committee/labels/good%20first%20issue) to see where we're looking for help. If you have your own ideas on how we can engage and build the community, feel free to open your own issues, create pull requests with improvements to our existing work, or help us by sharing your thoughts and ideas in the ongoing discussions we're having in GitHub. + +You can further participate in our ongoing efforts around community building - like localization, evangelism, the Node.js Collection, and others - by digging into their respective repositories and getting involved! + +Before diving in, please be sure to read the [Collaborator Guide](https://github.com/nodejs/community-committee/blob/master/governance/COLLABORATOR_GUIDE.md). + +If you're interested in participating in the Community Committee as a committee member, you should read the section below on **Observers and Membership**, and create an issue asking to be an Observer in our next Community Committee meeting. You can find a great example of such an issue [here](https://github.com/nodejs/community-committee/issues/142). + +## Observers and Membership + +If you're interested in becoming more deeply involved with the Community Committee and its projects, we encourage you to become an active observer, and work toward achieving member status. To become a member you must: + +1. Attend the bi-weekly meetings, investigate issues tagged as good first issue, file issues and pull requests, and provide insight via GitHub as a contributor or collaborator. +2. Request to become an Observer by filing an issue. Once added as an Observer to meetings, we will track attendance and participation for 3 months, in accordance with our governance guidelines. You can find a great example of such an issue [here](https://github.com/nodejs/community-committee/issues/142). +3. When you meet the 3 month minimum attendance, and participation expectations, the CommComm will vote to add you as a member. + +Membership is for 6 months. The group will ask on a regular basis if the expiring members would like to stay on. A member just needs to reply to renew. There is no fixed size of the CommComm. However, the expected target is between 9 and 12. You can read more about membership, and other administrative details, in our [Governance Guide](https://github.com/nodejs/community-committee/blob/master/GOVERNANCE.md). + +Regular CommComm meetings are held bi-monthly in a Zoom video conference, and broadcast live to the public on YouTube. Any community member or contributor can ask that something be added to the next meeting's agenda by logging a GitHub Issue. + +Meeting announcements and agendas are posted before the meeting begins in the organization's [GitHub issues](https://github.com/nodejs/community-committee/issues). You can also find the regularly scheduled meetings on the [Node.js Calendar](https://nodejs.org/calendar). To follow Node.js meeting livestreams on YouTube, subscribe to the Node.js Foundation [YouTube channel](https://www.youtube.com/channel/UCQPYJluYC_sn_Qz_XE-YbTQ). Be sure to click the bell to be notified of new videos! + +## Consensus Seeking Process + +The CommComm follows a [Consensus Seeking](https://en.wikipedia.org/wiki/Consensus-seeking_decision-making) decision making model. + +When an agenda item has appeared to reach a consensus, the moderator will ask "Does anyone object?" as a final call for dissent from the consensus. If a consensus cannot be reached that has no objections then a majority wins vote is called. It is expected that the majority of decisions made by the CommComm are via a consensus seeking process and that voting is only used as a last-resort. diff --git a/locale/fr/about/governance.md b/locale/fr/about/governance.md index 804ba42613d2..5c056358b86a 100644 --- a/locale/fr/about/governance.md +++ b/locale/fr/about/governance.md @@ -7,141 +7,18 @@ layout: about.hbs ## Comité de Pilotage Technique -Le projet est co-dirigé par un Comité de Pilotage Technique -(Technical Steering Committee - TSC) qui est responsable de -la gouvernance de haut-niveau du projet. - -Le TSC a toute autorité sur ce projet, y compris: - -* La direction technique -* La gouvernance et la gestion du projet (y compris cette politique) -* La politique de contribution -* La gestion des dépôts GitHub -* Les guides de conduite -* La maintenance de la liste des Collaborateurs - -Les invitations originelles à siéger au TSC ont été proposées -à des contributeurs actifs qui avaient une expérience significative -avec la gestion du projet. La participation à ce comité est susceptible -d'évoluer dans le temps en rapport avec les besoins du projet. - -Pour trouver la liste des membres actuels du TSC, voir le [README.md] -(https://github.com/nodejs/node/blob/master/README.md#tsc-technical-steering-committee) du projet. +Le projet est co-dirigé par un Comité de Pilotage Technique (Technical Steering Committee - TSC) qui est responsable de la gouvernance de haut-niveau du projet. ## Collaborateurs -Le dépôt GitHub [nodejs/node](https://github.com/nodejs/node) est -maintenu par le TSC et un groupe de Collaborateurs additionnels -qui sont ajoutés par le TSC de manière continue. - -Les personnes proposant des contributions significatives sont faites -Collaborateurs et se voient accorder les droits d'écriture sur le projet. -Ces personnes sont identifiées par le TSC et leur ajout aux Collaborateurs -est discuté lors des réunions hebdomadaires du TSC - -_Note:_ Si vous faites des contributions significatives et n'avez pas encore -obtenu les droits d'écriture, ouvrez un ticket ou contactez un membre du TSC -directement et votre demande sera examinée lors de la prochaine réunion. - -Les modifications de contenu sur le dépôt nodejs/node sont validées de -manière collaborative. N'importe qui possédant un compte GitHub peut -proposer une modification par pull request et elle sera examinée par les -Collaborateurs du projet. Toutes les pull request doivent être relues et acceptées -par un Collaborateur ayant une expertise suffisante et qui soit capable -d'assumer l'entière responsabilité du changement effectué. Dans le cas d'une pull -request proposée par un Collaborateur, un Collaborateur additionnel est requis -pour valider la modification. Un consensus doit être recherché si -d'autres Collaborateurs participent et qu'un désaccord survient sur -une modification. Voyez _Processus de Recherche de Consensus_ plus -bas pour plus de détails sur le modèle de consensus utilisé. +Le TSC a toute autorité sur ce projet, y compris: -Les Collaborateurs peuvent choisir de faire remonter au TSC pour -discussion des modifications significatives ou sujettes à controverse, -ou des modifications n'ayant pas obtenu de consensus, en leur assignant -l'étiquette ***tsc-agenda*** sur une pull request ou un ticket. Le -TSC servira alors d'arbitre final lorsque requis. +Les invitations originelles à siéger au TSC ont été proposées à des contributeurs actifs qui avaient une expérience significative avec la gestion du projet. La participation à ce comité est susceptible d'évoluer dans le temps en rapport avec les besoins du projet. -Pour la liste des Collaborateurs actuels, voir le [README.md] -(https://github.com/nodejs/node/blob/master/README.md#current-project-team-members) du projet. +Pour trouver la liste des membres actuels du TSC, voir le \[README.md\] (https://github.com/nodejs/node/blob/master/README.md#tsc-technical-steering-committee) du projet. -Un guide des Collaborateurs est maintenu sur -[COLLABORATOR_GUIDE.md](https://github.com/nodejs/node/blob/master/COLLABORATOR_GUIDE.md). +Le dépôt GitHub [nodejs/node](https://github.com/nodejs/node) est maintenu par le TSC et un groupe de Collaborateurs additionnels qui sont ajoutés par le TSC de manière continue. ## Siéger au TSC -Les sièges au TSC ne sont pas limités dans le temps. Le TSC n'a pas -de taille fixe. Cependant, l'objectif recherché est entre 6 et 12 personnes, -afin d'assurer une couverture adéquate des différents domaines d'expertise, -tout en conservant une capacité de prise de décision efficace. - -Il n'y a aucun prérequis ou qualifications nécessaires au-delà de -ces règles pour siéger au TSC. - -Le TSC peut ajouter des membres additionnels au TSC par une motion -standard du TSC. - -Un membre du TSC peut être retiré du TSC par démission volontaire, ou -par une motion standard du TSC. - -Les changements de modalités de participation au TSC devront être mises -au programme et peuvent être ajoutés comme n'importe quel autre -objet de programme (voir "Réunions du TSC" plus bas). - -Pas plus d'1/3 des membres du TSC ne pourra être affilié au même -employeur. Si le retrait ou la démission d'un membre du TSC, ou le -changement d'employeur d'un membre du TSC, créé une situation dans -laquelle plus d'1/3 des membres du TSC partage le même employeur, -la situation devra immédiatement être corrigée par la démission ou le -retrait d'un ou plusieurs membres du TSC affiliés avec le ou les -employeurs sur-représentés. - -## Réunions du TSC - -Le TSC se réunit de manière hebdomadaire sur Google Hangout On Air. -La réunion est animée par un modérateur désigné et approuvé par le TSC. -Chaque réunion est publiée sur Youtube. - -Sont ajoutés au programme les objets qui sont considérés contentieux -ou sont des modifications de gouvernance, de politique de contribution, de -participation au TSC, ou concernant le processus de release. - -L'intention du programme n'est pas d'approuver ou relire tous les -correctifs. Ceci devra être fait de manière continue sur GitHub -et géré par l'ensemble des Collaborateurs. - -N'importe quel membre de la communauté ou contributeur peut demander -à ajouter un objet au programme de la prochaine réunion en ouvrant un -ticket sur GitHub. N'importe quel Collaborateur, membre du TSC ou le -modérateur peut ajouter des objets au programme en ajoutant l'étiquette -***tsc-agenda*** au ticket. - -Avant chaque réunion du TSC, le modérateur partagera le Programme -avec les membres du TSC. Les membres du TSC pourront ajouter n'importe -quel objet de leur choix au programme au début de chaque réunion. Ni le -modérateur ni le TSC ne peuvent retirer ou poser de veto sur les objets. - -Le TSC peut inviter des personnes ou des représentants de certains projets -à participer sans pouvoir de vote. Ces invitations sont actuellement: - -* Un représentant du projet [build](https://github.com/node-forward/build) - choisi par ce projet. - -Le modérateur est chargé de résumer la discussion concernant chaque -objet du programme et de l'envoyer sous forme de pull request après -chaque réunion. - -## Processus de Recherche de Consensus - -Le TSC suit un modèle décisionnaire de -[Recherche de Consensus](https://en.wikipedia.org/wiki/Consensus-seeking_decision-making) -(en anglais). - -Quand un objet du programme a atteint un consensus, le modérateur -demande une dernière fois "Quelqu'un a-t'il une objection?" pour valider -le consensus. - -Si un objet du programme n'atteint pas de consensus, un membre du TSC -peut soit appeler à un vote de clôture, ou à un vote pour reporter -l'objet à la prochaine réunion. L'appel au vote doit être approuvé par -la majorité du TSC, sinon la discussion continue. Seule la majorité simple -est requise. +Les personnes proposant des contributions significatives sont faites Collaborateurs et se voient accorder les droits d'écriture sur le projet. Ces personnes sont identifiées par le TSC et leur ajout aux Collaborateurs est discuté lors des réunions hebdomadaires du TSC diff --git a/locale/fr/about/index.md b/locale/fr/about/index.md index 190598df4e57..f8808627e524 100644 --- a/locale/fr/about/index.md +++ b/locale/fr/about/index.md @@ -6,9 +6,7 @@ trademark: Trademark # A propos de Node.js® -En tant qu'environnement d'exécution JavaScript asynchrone et orienté événement, Node.js est conçu -pour générer des applications extensibles. Dans cet exemple ("hello world"), plusieures connexions peuvent être gérées de manière concurrente. -À chaque connexion, la fonction de rappel (_callback function_) est déclenchée, mais si il n'y a rien à faire, Node.js restera inactif. +En tant qu'environnement d'exécution JavaScript asynchrone et orienté événement, Node.js est conçu pour générer des applications extensibles. Dans cet exemple ("hello world"), plusieures connexions peuvent être gérées de manière concurrente. À chaque connexion, la fonction de rappel (_callback function_) est déclenchée, mais si il n'y a rien à faire, Node.js restera inactif. ```javascript const http = require('http'); @@ -27,46 +25,14 @@ server.listen(port, hostname, () => { }); ``` -Ceci contraste avec le modèle de concurrence plus commun dans lequel les processus sytème -sont utilisés. La gestion réseau basée sur les processus est relativement -inefficace et difficile à utiliser. De plus, les utilisateurs de Node.js n'ont pas à se soucier des problèmes d'interblocage des processus -puisqu'il n'y a pas de verrouillage. Aucune fonction de Node.js ou presque -n'effectue d'entrée/sortie, donc le processus ne se bloque pas. Et comme rien -n'est bloquant, développer un système extensible est relativement aisé avec Node.js. +Ceci contraste avec le modèle de concurrence plus commun dans lequel les processus sytème sont utilisés. La gestion réseau basée sur les processus est relativement inefficace et difficile à utiliser. De plus, les utilisateurs de Node.js n'ont pas à se soucier des problèmes d'interblocage des processus puisqu'il n'y a pas de verrouillage. Aucune fonction de Node.js ou presque n'effectue d'entrée/sortie, donc le processus ne se bloque pas. Et comme rien n'est bloquant, développer un système extensible est relativement aisé avec Node.js. -Si une partie des termes utilisés ne vous sont pas familliers, voici -un article complet (en anglais) [Bloquant vs Non-Bloquant][]. +Si une partie des termes utilisés ne vous sont pas familliers, voici un article complet (en anglais) [Bloquant vs Non-Bloquant](/en/docs/guides/blocking-vs-non-blocking/). --- -Node.js est conçu de manière similaire et influencé par des -librairies comme [Event Machine][] (en) pour Ruby et [Twisted][] (en) pour Python. -Node.js pousse le modèle événementiel encore plus loin. Il instaure la -[boucle événementielle][] (en) en tant que composant élémentaire de l'environnement d'exécution -et non comme une librairie. Dans les autres systèmes, il y a toujours -un appel bloquant pour démarrer la boucle événementielle. -Le comportement est défini habituellement par des fonctions de rappel au -début du script, et à la fin un serveur est démarré avec un appel bloquant -comme `EventMachine::run()`. Dans Node.js, il n'y a pas d'appel pour démarrer la boucle. -Node.js entre simplement dans la boucle après avoir exécuté le script d'entrée. -Node.js sort de la boucle événementielle lorsqu'il n'y a plus de fonction -de rappel à exécuter. Ce comportement est similaire à celui de JavaScript -dans un navigateur - la boucle événementielle est cachée à l'utilisateur. +Node.js est conçu de manière similaire et influencé par des librairies comme [Event Machine](https://github.com/eventmachine/eventmachine) (en) pour Ruby et [Twisted](https://twistedmatrix.com/trac/) (en) pour Python. Node.js pousse le modèle événementiel encore plus loin. Il instaure la [boucle événementielle](/en/docs/guides/event-loop-timers-and-nexttick/) (en) en tant que composant élémentaire de l'environnement d'exécution et non comme une librairie. Dans les autres systèmes, il y a toujours un appel bloquant pour démarrer la boucle événementielle. Le comportement est défini habituellement par des fonctions de rappel au début du script, et à la fin un serveur est démarré avec un appel bloquant comme `EventMachine::run()`. Dans Node.js, il n'y a pas d'appel pour démarrer la boucle. Node.js entre simplement dans la boucle après avoir exécuté le script d'entrée. Node.js sort de la boucle événementielle lorsqu'il n'y a plus de fonction de rappel à exécuter. Ce comportement est similaire à celui de JavaScript dans un navigateur - la boucle événementielle est cachée à l'utilisateur. -HTTP a une place prépondérante dans Node.js, qui a été conçu pour le streaming -et une faible latence. Ceci fait de Node.js une base toute désignée pour une librairie web ou un framework. +HTTP a une place prépondérante dans Node.js, qui a été conçu pour le streaming et une faible latence. Ceci fait de Node.js une base toute désignée pour une librairie web ou un framework. -Et si Node.js a été conçu sans processus multiples, vous pouvez tout de même -profiter d'un environnement multi-coeur. Vous pouvez générer des processus -enfant par le biais de l'API [`child_process.fork()`][] (en), avec lesquels -vous pourrez communiquer facilement. Basé sur la même interface, le - module - [`cluster`][] (en) vous permettra de partager les sockets entre vos processus - pour faire de la répartition de charge entre vos coeurs. - -[Bloquant vs Non-Bloquant]: /en/docs/guides/blocking-vs-non-blocking/ -[`child_process.fork()`]: https://nodejs.org/api/child_process.html#child_process_child_process_fork_modulepath_args_options -[`cluster`]: https://nodejs.org/api/cluster.html -[boucle événementielle]: /en/docs/guides/event-loop-timers-and-nexttick/ -[Event Machine]: https://github.com/eventmachine/eventmachine -[Twisted]: https://twistedmatrix.com/trac/ +Et si Node.js a été conçu sans processus multiples, vous pouvez tout de même profiter d'un environnement multi-coeur. Vous pouvez générer des processus enfant par le biais de l'API [`child_process.fork()`][] (en), avec lesquels vous pourrez communiquer facilement. Basé sur la même interface, le module [`cluster`][] (en) vous permettra de partager les sockets entre vos processus pour faire de la répartition de charge entre vos coeurs. diff --git a/locale/fr/about/privacy.md b/locale/fr/about/privacy.md new file mode 100644 index 000000000000..678a0bf4127d --- /dev/null +++ b/locale/fr/about/privacy.md @@ -0,0 +1,94 @@ +--- +title: Privacy Policy +layout: about.hbs +--- + +# Privacy Policy + +NODE.JS FOUNDATION (the "Foundation”) is committed to protecting the privacy of its users. This Privacy Policy (or the “Policy”) applies to its websites (whether currently or in the future supported, hosted or maintained, including without limitation nodejs.org, the “Sites”) and describes the information the Foundation collects about users of the Sites (“users”) and how that information may be used. + +Read the Privacy Policy carefully. By using any Site, you will be deemed to have accepted the terms of the Policy. If you do not agree to accept the terms of the Privacy Policy, you are directed to discontinue accessing or otherwise using the Sites or any materials obtained from the Sites. + +## Changes to the Privacy Policy +The Foundation reserves the right to update and change this Privacy Policy from time to time. Each time a user uses the Sites, the current version of the Privacy Policy applies. Accordingly, a user should check the date of this Privacy Policy (which appears at the top) and review for any changes since the last version. If a user does not agree to the Privacy Policy, the user should not use any of the Sites. Continued use any of the Sites following any revision of this Privacy Policy constitutes an acceptance of any change. + +## What Does this Privacy Policy Cover? +This Privacy Policy covers the Foundation’s treatment of aggregate information collected by the Sites and personal information that you provide in connection with your use of the Sites. This Policy does not apply to the practices of third parties that the Foundation does not own or control, including but not limited to third party services you access through the Foundation, or to individuals that the Foundation does not employ or manage. + +## Children Under 13 Years of Age +Unless specifically indicated within a Site, the Sites are not intended for minor children not of age (including without limitation those under 13), and they should not use the Sites. If you are under 18, you may use the Site only with involvement of a parent or guardian or if you are an emancipated minor. Except as specifically indicated within a Site, we do not knowingly collect or solicit information from, market to or accept services from children. If we become aware that a child under 13 has provided us with personal information without parental consent, we will take reasonable steps to remove such information and terminate the child’s account. If you become aware that a child has provided us with personally identifiable information without parental consent, please contact us at privacy@nodejs.org so we may remove the information. + +## Information About Users that the Foundation Collects +On the Sites, users may order products or services, and register to receive materials. Information collected on the Sites includes community forum content, diaries, profiles, photographs, names, unique identifiers (e.g., social media handles or usernames), contact and billing information (e.g., email address, postal address, telephone, fax), and transaction information. In order to access certain personalized services on the Sites, you may be asked to also create and store a username and password for an account from the Foundation. + +In order to tailor the Foundation’s subsequent communications to users and continuously improve the Sites’ products and services, the Foundation may also ask users to provide information regarding their interests, demographics, experience and detailed contact preferences. The Foundation and third party advertising companies may track information concerning a user’s use of the Sites, such as a user’s IP address. + +## How the Foundation Uses the Information Collected +The Foundation may use collected information for any lawful purpose related to the Foundation’s business, including, but not limited to: + +* To understand a user’s needs and create content that is relevant to the user; +* To generate statistical studies; +* To conduct market research and planning by sending user surveys; +* To notify user referrals of services, information, or products when a user requests that the Foundation send such information to referrals; +* To improve services, information, and products; +* To help a user complete a transaction, or provide services or customer support; +* To communicate back to the user; +* To update the user on services, information, and products; +* To personalize a Site for the user; +* To notify the user of any changes with a Site that may affect the user; +* To enforce terms of use on a Site; and +* To allow the user to purchase products, access services, or otherwise engage in activities the user selects. + +User names, identifications ("IDs"), and email addresses (as well as any additional information that a user may choose to post) may be publicly available on a Site when users voluntarily and publicly disclose personal information, such as when a user posts information in conjunction with content subject to an Open Source license, or as part of a message posted to a public forum or a publicly released software application. The personal information you may provide to the Foundation may reveal or allow others to discern aspects of your life that are not expressly stated in your profile (for example, your picture or your name may reveal your hair color, race or approximate age). By providing personal information to us when you create or update your account and profile or post a photograph, you are expressly and voluntarily accepting our Terms of Use and freely accepting and agreeing to our processing of your personal information in ways set out by this Privacy Policy. Supplying information to us, including any information deemed “sensitive” by applicable law, is entirely voluntary on your part. You may withdraw your consent to the Foundation’s collection and processing of your information by closing your account. You should be aware that your information may continue to be viewable to others after you close your account, such as on cached pages on Internet search engines. Users may not be able to change or remove public postings once posted. Such information may be used by visitors of these pages to send unsolicited messages. The Foundation is not responsible for any consequences which may occur from the third-party use of information that a user chooses to submit to public pages. + +## Opt Out +A user will always be able to make the decision whether to proceed with any activity that requests personal information including personally identifiable information. If a user does not provide requested information, the user may not be able to complete certain transactions. + +Users are not licensed to add other users to a Site (even users who entered into transactions with them) or to their mailing lists without written consent. The Foundation encourages users to evaluate privacy and security policies of any of the Sites’ transaction partners before entering into transactions or choosing to disclose information. + +## Email +The Foundation may use (or provide to The Linux Foundation or other third party contractors to use) contact information received by the Foundation to email any user with respect to any Foundation or project of The Linux Foundation (a “Project”) opportunity, event or other matter. + +If a user no longer wishes to receive emails from the Foundation or any Project or any Site, the Foundation will (or, if applicable, have The Linux Foundation) provide instructions in each of its emails on how to be removed from any lists. The Foundation will make commercially reasonable efforts to honor such requests. + +## Photographs +Users may have the opportunity to submit photographs to the Sites for product promotions, contests, and other purposes to be disclosed at the time of request. In these circumstances, the Sites are designed to allow the public to view, download, save, and otherwise access the photographs posted. By submitting a photograph, users waive any privacy expectations users have with respect to the security of such photographs, and the Foundation’s use or exploitation of users’ likeness. You may submit a photograph only if you are the copyright holder or if you are authorized to do so under license by the copyright holder, and by submitting a photograph you agree to indemnify and hold the Foundation, its directors, officers, employees and agents harmless from any claims arising out of your submission. By submitting a photograph, you grant the Foundation a perpetual, worldwide, royalty-free license to use the photograph in any media now known of hereinafter invented for any business purpose that the Foundation, at its sole discretion, may decide. + +## Links to Third-Party Sites and Services +The Sites may permit you to access or link to third party websites and information on the Internet, and other websites may contain links to the Sites. When a user uses these links, the user leaves the Sites. The Foundation has not reviewed these third party sites, does not control, and is not responsible for, any of the third party sites, their content or privacy practices. The privacy and security practices of websites accessed from the Sites are not covered by this Privacy Policy, and the Foundation is not responsible for the privacy or security practices or the content of such websites, including but not limited to the third party services you access through the Foundation. If a user decides to access any of the linked sites, the Foundation encourages the user to read the privacy statements of those sites. The user accesses such sites at user’s own risk. + +We may receive information when you use your account to log into a third-party site or application in order to recommend tailored content or advertising to you and to improve your user experience on our site. We may provide reports containing aggregated impression information to third parties to measure Internet traffic and usage patterns. + +## Service Orders +To purchase services, users may be asked to be directed to a third party site, such as PayPal, to pay for their purchases. If applicable, the third party site may collect payment information directly to facilitate a transaction. The Foundation will only record the result of the transaction and any references to the transaction record provided by the third party site. The Foundation is not responsible for the services provided or information collected on such third party sites. + +## Sharing of Information +The Foundation may disclose personal or aggregate information that is associated with your profile as described in this Privacy Policy, as permitted by law or as reasonably necessary to: (1) comply with a legal requirement or process, including, but not limited to, civil and criminal subpoenas, court orders or other compulsory disclosures; (2) investigate and enforce this Privacy Policy or our then-current Terms of Use, if any; (3) respond to claims of a violation of the rights of third parties; (4) respond to customer service inquiries; (5) protect the rights, property, or safety of the Foundation, our users, or the public; or (6) as part of the sale of all or a portion of the assets of the Foundation or as a change in control of the organization or one of its affiliates or in preparation for any of these events. The Foundation reserves the right to supply any such information to any organization into which the Foundation may merge in the future or to which it may make any transfer. Any third party to which the Foundation transfers or sells all or any of its assets will have the right to use the personal and other information that you provide in the manner set out in this Privacy Policy. + +## Is Information About Me Secure? +To keep your information safe, prevent unauthorized access or disclosure, maintain data accuracy, and ensure the appropriate use of information, the Foundation implements industry-standard physical, electronic, and managerial procedures to safeguard and secure the information the Foundation collects. However, the Foundation does not guarantee that unauthorized third parties will never defeat measures taken to prevent improper use of personally identifiable information. + +Access to users’ nonpublic personally identifiable information is restricted to the Foundation and Linux Foundation personnel, including contractors for each such organization on a need-to-know basis. + +User passwords are keys to accounts. Use unique numbers, letters, and special characters for passwords and do not disclose passwords to other people in order to prevent loss of account control. Users are responsible for all actions taken in their accounts. Notify the Foundation of any password compromises, and change passwords periodically to maintain account protection. + +In the event the Foundation becomes aware that the security of a Site has been compromised or user’s personally identifiable information has been disclosed to unrelated third parties as a result of external activity, including but not limited to security attacks or fraud, the Foundation reserves the right to take reasonable appropriate measures, including but not limited to, investigation and reporting, and notification to and cooperation with law enforcement authorities. + +While our aim is to keep data from unauthorized or unsafe access, modification or destruction, no method of transmission on the Internet, or method of electronic storage, is 100% secure and we cannot guarantee its absolute security. + +## Data Protection +Given the international scope of the Foundation, personal information may be visible to persons outside your country of residence, including to persons in countries that your own country’s privacy laws and regulations deem deficient in ensuring an adequate level of protection for such information. If you are unsure whether this privacy statement is in conflict with applicable local rules, you should not submit your information. If you are located within the European Union, you should note that your information will be transferred to the United States, which is deemed by the European Union to have inadequate data protection. Nevertheless, in accordance with local laws implementing the European Union Privacy Directive on the protection of individuals with regard to the processing of personal data and on the free movement of such data, individuals located in countries outside of the United States of America who submit personal information do thereby consent to the general use of such information as provided in this Privacy Policy and to its transfer to and/or storage in the United States of America. By utilizing any Site and/or directly providing personal information to us, you hereby agree to and acknowledge your understanding of the terms of this Privacy Policy, and consent to have your personal data transferred to and processed in the United States and/or in other jurisdictions as determined by the Foundation, notwithstanding your country of origin, or country, state and/or province of residence. If you do not want your personal information collected and used by the Foundation, please do not visit or use the Sites. + +## Governing Law +This Privacy Policy is governed by the laws of the State of California, United States of America without giving any effect to the principles of conflicts of law. + +## California Privacy Rights +The California Online Privacy Protection Action (“CalOPPA”) permits customers who are California residents and who have provided the Foundation with “personal information” as defined in CalOPPA to request certain information about the disclosure of information to third parties for their direct marketing purposes. If you are a California resident with a question regarding this provision, please contact privacy@nodejs.org. + +Please note that the Foundation does not respond to “do not track” signals or other similar mechanisms intended to allow California residents to opt-out of Internet tracking under CalOPPA. The Foundation may track and/or disclose your online activities over time and across different websites to third parties when you use our services. + +## What to Do in the Event of Lost or Stolen Information +You must promptly notify us if you become aware that any information provided by or submitted to our Site or through our Product is lost, stolen, or used without permission at privacy@nodejs.org. + +## Questions or Concerns +If you have any questions or concerns regarding privacy at the Foundation, please send us a detailed message to [privacy@nodejs.org](mailto:privacy@nodejs.org). diff --git a/locale/fr/about/releases.md b/locale/fr/about/releases.md new file mode 100644 index 000000000000..ceada1ef7a7e --- /dev/null +++ b/locale/fr/about/releases.md @@ -0,0 +1,22 @@ +--- +layout: about-release-schedule.hbs +title: Releases +statuses: + maintenance: 'Maintenance LTS' + active: 'Active LTS' + current: 'Current' + pending: 'Pending' +columns: + - 'Release' + - 'Status' + - 'Codename' + - 'Initial Release' + - 'Active LTS Start' + - 'Maintenance LTS Start' + - 'End-of-life' +schedule-footer: Dates are subject to change. +--- + +# Releases + +Major Node.js versions enter _Current_ release status for six months, which gives library authors time to add support for them. After six months, odd-numbered releases (9, 11, etc.) become unsupported, and even-numbered releases (10, 12, etc.) move to _Active LTS_ status and are ready for general use. _LTS_ release status is "long-term support", which typically guarantees that critical bugs will be fixed for a total of 30 months. Production applications should only use _Active LTS_ or _Maintenance LTS_ releases. diff --git a/locale/fr/about/resources.md b/locale/fr/about/resources.md new file mode 100644 index 000000000000..beb4fd4aaa78 --- /dev/null +++ b/locale/fr/about/resources.md @@ -0,0 +1,31 @@ +--- +layout: about.hbs +title: Logos and Graphics +--- + +# Resources + +## Logo Downloads + +Please review the [trademark policy](/en/about/trademark/) for information about permissible use of Node.js® logos and marks. + +Guidelines for the visual display of the Node.js mark are described in the [Visual Guidelines](/static/documents/foundation-visual-guidelines.pdf). + + + + + + + + + + + + + + + + + + +
Node.js on light backgroundNode.js on dark background
Node.js standard AINode.js reversed AI
Node.js on light backgroundNode.js on dark background
Node.js standard with less color AINode.js reversed with less color AI
diff --git a/locale/fr/about/trademark.md b/locale/fr/about/trademark.md new file mode 100644 index 000000000000..b9d1308145d1 --- /dev/null +++ b/locale/fr/about/trademark.md @@ -0,0 +1,14 @@ +--- +layout: about.hbs +title: Trademark Policy +--- + +# Trademark Policy + +The Node.js trademarks, service marks, and graphics marks are symbols of the quality, performance, and ease of use that people have come to associate with the Node.js software and project. To ensure that the Node.js marks continue to symbolize these qualities, we must ensure that the marks are only used in ways that do not mislead people or cause them to confuse Node.js with other software of lower quality. If we don’t ensure the marks are used in this way, it cannot only confuse users, it can make it impossible to use the mark to protect against people who maliciously exploit the mark in the future. The primary goal of this policy is to make sure that this doesn’t happen to the Node.js mark, so that the community and users of Node.js are always protected in the future. + +At the same time, we’d like community members to feel comfortable spreading the word about Node.js and participating in the Node.js community. Keeping that goal in mind, we’ve tried to make the policy as flexible and easy to understand as legally possible. + +Please read the [full policy](/static/documents/trademark-policy.pdf). If you have any questions don't hesitate to [email us](mailto:trademark@nodejs.org). + +Guidelines for the visual display of the Node.js mark are described in the [Visual Guidelines](/static/documents/foundation-visual-guidelines.pdf). diff --git a/locale/fr/about/working-groups.md b/locale/fr/about/working-groups.md new file mode 100644 index 000000000000..06c1f11a5fea --- /dev/null +++ b/locale/fr/about/working-groups.md @@ -0,0 +1,199 @@ +--- +layout: about.hbs +title: Working Groups +--- + +# Core Working Groups + + +Core Working Groups are created by the [Technical Steering Committee (TSC)](https://github.com/nodejs/TSC/blob/master/TSC-Charter.md). + +## Current Working Groups + +* [Addon API](#addon-api) +* [Benchmarking](#benchmarking) +* [Build](#build) +* [Diagnostics](#diagnostics) +* [Docker](#docker) +* [Evangelism](#evangelism) +* [i18n](#i18n) +* [Release](#release) +* [Security](#security) +* [Streams](#streams) + +### [Addon API](https://github.com/nodejs/nan) + +The Addon API Working Group is responsible for maintaining the NAN project and corresponding _nan_ package in npm. The NAN project makes available an abstraction layer for native add-on authors for Node.js, assisting in the writing of code that is compatible with many actively used versions of Node.js, V8 and libuv. + +Responsibilities include: + +* Maintaining the [NAN](https://github.com/nodejs/nan) GitHub repository, including code, issues and documentation. +* Maintaining the [addon-examples](https://github.com/nodejs/node-addon-examples) GitHub repository, including code, issues and documentation. +* Maintaining the C++ Addon API within the Node.js project, in subordination to the Node.js TSC. +* Maintaining the Addon documentation within the Node.js project, in subordination to the Node.js TSC. +* Maintaining the _nan_ package in npm, releasing new versions as appropriate. +* Messaging about the future of the Node.js and NAN interface to give the community advance notice of changes. + +The current members can be found in their [README](https://github.com/nodejs/nan#collaborators). + +### [Benchmarking](https://github.com/nodejs/benchmarking) + +The purpose of the Benchmark Working Group is to gain consensus on an agreed set of benchmarks that can be used to: + +* track and evangelize performance gains made between Node.js releases +* avoid performance regressions between releases + +Responsibilities include: + +* Identifying 1 or more benchmarks that reflect customer usage. Likely will need more than one to cover typical Node.js use cases including low-latency and high concurrency +* Working to get community consensus on the list chosen +* Adding regular execution of chosen benchmarks to Node.js builds +* Tracking/publicizing performance between builds/releases + +### [Build](https://github.com/nodejs/build) + +The Build Working Group's purpose is to create and maintain a distributed automation infrastructure. + +Responsibilities include: + +* Producing packages for all target platforms. +* Running tests. +* Running performance testing and comparisons. +* Creating and managing build-containers. + +### [Diagnostics](https://github.com/nodejs/diagnostics) + +The Diagnostics Working Group's purpose is to surface a set of comprehensive, documented, and extensible diagnostic interfaces for use by Node.js tools and JavaScript VMs. + +Responsibilities include: + +* Collaborating with V8 to integrate `v8_inspector` into Node.js. +* Collaborating with V8 to integrate `trace_event` into Node.js. +* Collaborating with Core to refine `async_wrap` and `async_hooks`. +* Maintaining and improving OS trace system integration (e.g. ETW, LTTNG, dtrace). +* Documenting diagnostic capabilities and APIs in Node.js and its components. +* Exploring opportunities and gaps, discussing feature requests, and addressing conflicts in Node.js diagnostics. +* Fostering an ecosystem of diagnostics tools for Node.js. +* Defining and adding interfaces/APIs in order to allow dumps to be generated when needed. +* Defining and adding common structures to the dumps generated in order to support tools that want to introspect those dumps. + +### [Docker](https://github.com/nodejs/docker-node) + +The Docker Working Group's purpose is to build, maintain, and improve official Docker images for the Node.js project. + +Responsibilities include: + +* Keeping the official Docker images updated in line with new Node.js releases. +* Decide and implement image improvements and/or fixes. +* Maintain and improve the images' documentation. + +### [Evangelism](https://github.com/nodejs/evangelism) + +The Evangelism Working Group promotes the accomplishments of Node.js and lets the community know how they can get involved. + +Responsibilities include: + +* Facilitating project messaging. +* Managing official project social media. +* Handling the promotion of speakers for meetups and conferences. +* Handling the promotion of community events. +* Publishing regular update summaries and other promotional content. + +### [i18n](https://github.com/nodejs/i18n) + +The i18n Working Groups handle more than just translations. They are endpoints for community members to collaborate with each other in their language of choice. + +Each team is organized around a common spoken language. Each language community might then produce multiple localizations for various project resources. + +Responsibilities include: + +* Translating any Node.js materials they believe are relevant to their community. +* Reviewing processes for keeping translations up to date and of high quality. +* Managing and monitoring social media channels in their language. +* Promoting Node.js speakers for meetups and conferences in their language. + +Each language community maintains its own membership. + +* [nodejs-ar - Arabic (العَرَبِيَّة)](https://github.com/nodejs/nodejs-ar) +* [nodejs-bg - Bulgarian (български)](https://github.com/nodejs/nodejs-bg) +* [nodejs-bn - Bengali (বাংলা)](https://github.com/nodejs/nodejs-bn) +* [nodejs-zh-CN - Chinese (简体中文)](https://github.com/nodejs/nodejs-zh-CN) +* [nodejs-cs - Czech (Čeština)](https://github.com/nodejs/nodejs-cs) +* [nodejs-da - Danish (Dansk)](https://github.com/nodejs/nodejs-da) +* [nodejs-de - German (Deutsch)](https://github.com/nodejs/nodejs-de) +* [nodejs-el - Greek (Ελληνικά)](https://github.com/nodejs/nodejs-el) +* [nodejs-es - Spanish (Español)](https://github.com/nodejs/nodejs-es) +* [nodejs-fa - Persian (فارسی)](https://github.com/nodejs/nodejs-fa) +* [nodejs-fi - Finnish (Suomi)](https://github.com/nodejs/nodejs-fi) +* [nodejs-fr - French (Français)](https://github.com/nodejs/nodejs-fr) +* [nodejs-he - Hebrew (עברית)](https://github.com/nodejs/nodejs-he) +* [nodejs-hi - Hindi (हिन्दी)](https://github.com/nodejs/nodejs-hi) +* [nodejs-hu - Hungarian (Magyar)](https://github.com/nodejs/nodejs-hu) +* [nodejs-id - Indonesian (Bahasa Indonesia)](https://github.com/nodejs/nodejs-id) +* [nodejs-it - Italian (Italiano)](https://github.com/nodejs/nodejs-it) +* [nodejs-ja - Japanese (日本語)](https://github.com/nodejs/nodejs-ja) +* [nodejs-ka - Georgian (ქართული)](https://github.com/nodejs/nodejs-ka) +* [nodejs-ko - Korean (한국어)](https://github.com/nodejs/nodejs-ko) +* [nodejs-mk - Macedonian (Македонски)](https://github.com/nodejs/nodejs-mk) +* [nodejs-ms - Malay (بهاس ملايو‎)](https://github.com/nodejs/nodejs-ms) +* [nodejs-nl - Dutch (Nederlands)](https://github.com/nodejs/nodejs-nl) +* [nodejs-no - Norwegian (Norsk)](https://github.com/nodejs/nodejs-no) +* [nodejs-pl - Polish (Język Polski)](https://github.com/nodejs/nodejs-pl) +* [nodejs-pt - Portuguese (Português)](https://github.com/nodejs/nodejs-pt) +* [nodejs-ro - Romanian (Română)](https://github.com/nodejs/nodejs-ro) +* [nodejs-ru - Russian (Русский)](https://github.com/nodejs/nodejs-ru) +* [nodejs-sv - Swedish (Svenska)](https://github.com/nodejs/nodejs-sv) +* [nodejs-ta - Tamil (தமிழ்)](https://github.com/nodejs/nodejs-ta) +* [nodejs-tr - Turkish (Türkçe)](https://github.com/nodejs/nodejs-tr) +* [nodejs-zh-TW - Taiwanese (繁體中文(台灣))](https://github.com/nodejs/nodejs-zh-TW) +* [nodejs-uk - Ukrainian (Українська)](https://github.com/nodejs/nodejs-uk) +* [nodejs-vi - Vietnamese (Tiếng Việt)](https://github.com/nodejs/nodejs-vi) + +### [Release](https://github.com/nodejs/Release) + +The Release Working Group manages the release process for Node.js. + +Responsibilities include: + +* Define the release process. +* Define the content of releases. +* Generate and create releases. +* Test Releases. +* Manage the Long Term Support and Current branches including backporting changes to these branches. +* Define the policy for what gets backported to release streams + +### [Security](https://github.com/nodejs/security-wg) + +The Security Working Group manages all aspects and processes linked to Node.js security. + +Responsibilities include: + +* Define and maintain security policies and procedures for: + * the core Node.js project + * other projects maintained by the Node.js Technical Steering Committee (TSC). +* Work with the Node Security Platform to bring community vulnerability data into the foundation as a shared asset. +* Ensure the vulnerability data is updated in an efficient and timely manner. For example, ensuring there are well-documented processes for reporting vulnerabilities in community modules. +* Review and recommend processes for handling of security reports (but not the actual administration of security reports, which are reviewed by a group of people directly delegated to by the TSC). +* Define and maintain policies and procedures for the coordination of security concerns within the external Node.js open source ecosystem. +* Offer help to npm package maintainers to fix high-impact security bugs. +* Maintain and make available data on disclosed security vulnerabilities in: + * the core Node.js project + * other projects maintained by the Node.js Foundation technical group + * the external Node.js open source ecosystem +* Promote the improvement of security practices within the Node.js ecosystem. +* Recommend security improvements for the core Node.js project. +* Facilitate and promote the expansion of a healthy security service and product provider ecosystem. + +### [Streams](https://github.com/nodejs/readable-stream) + +The Streams Working Group is dedicated to the support and improvement of the Streams API as used in Node.js and the npm ecosystem. We seek to create a composable API that solves the problem of representing multiple occurrences of an event over time in a humane, low-overhead fashion. Improvements to the API will be driven by the needs of the ecosystem; interoperability and backwards compatibility with other solutions and prior versions are paramount in importance. + +Responsibilities include: + +* Addressing stream issues on the Node.js issue tracker. +* Authoring and editing stream documentation within the Node.js project. +* Reviewing changes to stream subclasses within the Node.js project. +* Redirecting changes to streams from the Node.js project to this project. +* Assisting in the implementation of stream providers within Node.js. +* Recommending versions of `readable-stream` to be included in Node.js. +* Messaging about the future of streams to give the community advance notice of changes. diff --git a/locale/fr/docs/es6.md b/locale/fr/docs/es6.md new file mode 100644 index 000000000000..e73cd7bb6604 --- /dev/null +++ b/locale/fr/docs/es6.md @@ -0,0 +1,46 @@ +--- +title: ECMAScript 2015 (ES6) and beyond +layout: docs.hbs +--- + +# ECMAScript 2015 (ES6) and beyond + +Node.js is built against modern versions of [V8](https://v8.dev/). By keeping up-to-date with the latest releases of this engine, we ensure new features from the [JavaScript ECMA-262 specification](http://www.ecma-international.org/publications/standards/Ecma-262.htm) are brought to Node.js developers in a timely manner, as well as continued performance and stability improvements. + +All ECMAScript 2015 (ES6) features are split into three groups for **shipping**, **staged**, and **in progress** features: + +* All **shipping** features, which V8 considers stable, are turned **on by default on Node.js** and do **NOT** require any kind of runtime flag. +* **Staged** features, which are almost-completed features that are not considered stable by the V8 team, require a runtime flag: `--harmony`. +* **In progress** features can be activated individually by their respective harmony flag, although this is highly discouraged unless for testing purposes. Note: these flags are exposed by V8 and will potentially change without any deprecation notice. + +## Which features ship with which Node.js version by default? + +The website [node.green](https://node.green/) provides an excellent overview over supported ECMAScript features in various versions of Node.js, based on kangax's compat-table. + +## Which features are in progress? + +New features are constantly being added to the V8 engine. Generally speaking, expect them to land on a future Node.js release, although timing is unknown. + +You may list all the *in progress* features available on each Node.js release by grepping through the `--v8-options` argument. Please note that these are incomplete and possibly broken features of V8, so use them at your own risk: + +```bash +node --v8-options | grep "in progress" +``` + +## What about the performance of a particular feature? + +The V8 team is constantly working to improve the performance of new language features to eventually reach parity with their transpiled or native counterparts in EcmaScript 5 and earlier. The current progress there is tracked on the website [six-speed](https://fhinkel.github.io/six-speed), which shows the performance of ES2015 and ESNext features compared to their native ES5 counterparts. + +The work on optimizing features introduced with ES2015 and beyond is coordinated via a [performance plan](https://docs.google.com/document/d/1EA9EbfnydAmmU_lM8R_uEMQ-U_v4l9zulePSBkeYWmY), where the V8 team gathers and coordinates areas that need improvement, and design documents to tackle those problems. + +## I have my infrastructure set up to leverage the --harmony flag. Should I remove it? + +The current behaviour of the `--harmony` flag on Node.js is to enable **staged** features only. After all, it is now a synonym of `--es_staging`. As mentioned above, these are completed features that have not been considered stable yet. If you want to play safe, especially on production environments, consider removing this runtime flag until it ships by default on V8 and, consequently, on Node.js. If you keep this enabled, you should be prepared for further Node.js upgrades to break your code if V8 changes their semantics to more closely follow the standard. + +## How do I find which version of V8 ships with a particular version of Node.js? + +Node.js provides a simple way to list all dependencies and respective versions that ship with a specific binary through the `process` global object. In case of the V8 engine, type the following in your terminal to retrieve its version: + +```bash +node -p process.versions.v8 +``` diff --git a/locale/fr/docs/guides/anatomy-of-an-http-transaction.md b/locale/fr/docs/guides/anatomy-of-an-http-transaction.md new file mode 100644 index 000000000000..da0a84d34679 --- /dev/null +++ b/locale/fr/docs/guides/anatomy-of-an-http-transaction.md @@ -0,0 +1,316 @@ +--- +title: Anatomy of an HTTP Transaction +layout: docs.hbs +--- + +# Anatomy of an HTTP Transaction + +The purpose of this guide is to impart a solid understanding of the process of Node.js HTTP handling. We'll assume that you know, in a general sense, how HTTP requests work, regardless of language or programming environment. We'll also assume a bit of familiarity with Node.js [`EventEmitters`][] and [`Streams`][]. If you're not quite familiar with them, it's worth taking a quick read through the API docs for each of those. + +## Create the Server + +Any node web server application will at some point have to create a web server object. This is done by using [`createServer`][]. + +```javascript +const http = require('http'); + +const server = http.createServer((request, response) => { + // magic happens here! +}); +``` + +The function that's passed in to [`createServer`][] is called once for every HTTP request that's made against that server, so it's called the request handler. In fact, the [`Server`][] object returned by [`createServer`][] is an [`EventEmitter`][], and what we have here is just shorthand for creating a `server` object and then adding the listener later. + +```javascript +const server = http.createServer(); +server.on('request', (request, response) => { + // the same kind of magic happens here! +}); +``` + +When an HTTP request hits the server, node calls the request handler function with a few handy objects for dealing with the transaction, `request` and `response`. We'll get to those shortly. + +In order to actually serve requests, the [`listen`][] method needs to be called on the `server` object. In most cases, all you'll need to pass to `listen` is the port number you want the server to listen on. There are some other options too, so consult the [API reference](https://nodejs.org/api/http.html). + +## Method, URL and Headers + +When handling a request, the first thing you'll probably want to do is look at the method and URL, so that appropriate actions can be taken. Node.js makes this relatively painless by putting handy properties onto the `request` object. + +```javascript +const { method, url } = request; +``` + +> **Note:** The `request` object is an instance of [`IncomingMessage`][]. + +The `method` here will always be a normal HTTP method/verb. The `url` is the full URL without the server, protocol or port. For a typical URL, this means everything after and including the third forward slash. + +Headers are also not far away. They're in their own object on `request` called `headers`. + +```javascript +const { headers } = request; +const userAgent = headers['user-agent']; +``` + +It's important to note here that all headers are represented in lower-case only, regardless of how the client actually sent them. This simplifies the task of parsing headers for whatever purpose. + +If some headers are repeated, then their values are overwritten or joined together as comma-separated strings, depending on the header. In some cases, this can be problematic, so [`rawHeaders`][] is also available. + +## Request Body + +When receiving a `POST` or `PUT` request, the request body might be important to your application. Getting at the body data is a little more involved than accessing request headers. The `request` object that's passed in to a handler implements the [`ReadableStream`][] interface. This stream can be listened to or piped elsewhere just like any other stream. We can grab the data right out of the stream by listening to the stream's `'data'` and `'end'` events. + +The chunk emitted in each `'data'` event is a [`Buffer`][]. If you know it's going to be string data, the best thing to do is collect the data in an array, then at the `'end'`, concatenate and stringify it. + +```javascript +let body = []; +request.on('data', (chunk) => { + body.push(chunk); +}).on('end', () => { + body = Buffer.concat(body).toString(); + // at this point, `body` has the entire request body stored in it as a string +}); +``` + +> **Note:** This may seem a tad tedious, and in many cases, it is. Luckily, there are modules like [`concat-stream`][] and [`body`][] on [`npm`][] which can help hide away some of this logic. It's important to have a good understanding of what's going on before going down that road, and that's why you're here! + +## A Quick Thing About Errors + +Since the `request` object is a [`ReadableStream`][], it's also an [`EventEmitter`][] and behaves like one when an error happens. + +An error in the `request` stream presents itself by emitting an `'error'` event on the stream. **If you don't have a listener for that event, the error will be *thrown*, which could crash your Node.js program.** You should therefore add an `'error'` listener on your request streams, even if you just log it and continue on your way. (Though it's probably best to send some kind of HTTP error response. More on that later.) + +```javascript +request.on('error', (err) => { + // This prints the error message and stack trace to `stderr`. + console.error(err.stack); +}); +``` + +There are other ways of [handling these errors](https://nodejs.org/api/errors.html) such as other abstractions and tools, but always be aware that errors can and do happen, and you're going to have to deal with them. + +## What We've Got so Far + +At this point, we've covered creating a server, and grabbing the method, URL, headers and body out of requests. When we put that all together, it might look something like this: + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + const { headers, method, url } = request; + let body = []; + request.on('error', (err) => { + console.error(err); + }).on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + // At this point, we have the headers, method, url and body, and can now + // do whatever we need to in order to respond to this request. + }); +}).listen(8080); // Activates this server, listening on port 8080. +``` + +If we run this example, we'll be able to *receive* requests, but not *respond* to them. In fact, if you hit this example in a web browser, your request would time out, as nothing is being sent back to the client. + +So far we haven't touched on the `response` object at all, which is an instance of [`ServerResponse`][], which is a [`WritableStream`][]. It contains many useful methods for sending data back to the client. We'll cover that next. + +## HTTP Status Code + +If you don't bother setting it, the HTTP status code on a response will always be 200. Of course, not every HTTP response warrants this, and at some point you'll definitely want to send a different status code. To do that, you can set the `statusCode` property. + +```javascript +response.statusCode = 404; // Tell the client that the resource wasn't found. +``` + +There are some other shortcuts to this, as we'll see soon. + +## Setting Response Headers + +Headers are set through a convenient method called [`setHeader`][]. + +```javascript +response.setHeader('Content-Type', 'application/json'); +response.setHeader('X-Powered-By', 'bacon'); +``` + +When setting the headers on a response, the case is insensitive on their names. If you set a header repeatedly, the last value you set is the value that gets sent. + +## Explicitly Sending Header Data + +The methods of setting the headers and status code that we've already discussed assume that you're using "implicit headers". This means you're counting on node to send the headers for you at the correct time before you start sending body data. + +If you want, you can *explicitly* write the headers to the response stream. To do this, there's a method called [`writeHead`][], which writes the status code and the headers to the stream. + +```javascript +response.writeHead(200, { + 'Content-Type': 'application/json', + 'X-Powered-By': 'bacon' +}); +``` + +Once you've set the headers (either implicitly or explicitly), you're ready to start sending response data. + +## Sending Response Body + +Since the `response` object is a [`WritableStream`][], writing a response body out to the client is just a matter of using the usual stream methods. + +```javascript +response.write(''); +response.write(''); +response.write('

Hello, World!

'); +response.write(''); +response.write(''); +response.end(); +``` + +The `end` function on streams can also take in some optional data to send as the last bit of data on the stream, so we can simplify the example above as follows. + +```javascript +response.end('

Hello, World!

'); +``` + +> **Note:** It's important to set the status and headers *before* you start writing chunks of data to the body. This makes sense, since headers come before the body in HTTP responses. + +## Another Quick Thing About Errors + +The `response` stream can also emit `'error'` events, and at some point you're going to have to deal with that as well. All of the advice for `request` stream errors still applies here. + +## Put It All Together + +Now that we've learned about making HTTP responses, let's put it all together. Building on the earlier example, we're going to make a server that sends back all of the data that was sent to us by the user. We'll format that data as JSON using `JSON.stringify`. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + const { headers, method, url } = request; + let body = []; + request.on('error', (err) => { + console.error(err); + }).on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + // BEGINNING OF NEW STUFF + + response.on('error', (err) => { + console.error(err); + }); + + response.statusCode = 200; + response.setHeader('Content-Type', 'application/json'); + // Note: the 2 lines above could be replaced with this next one: + // response.writeHead(200, {'Content-Type': 'application/json'}) + + const responseBody = { headers, method, url, body }; + + response.write(JSON.stringify(responseBody)); + response.end(); + // Note: the 2 lines above could be replaced with this next one: + // response.end(JSON.stringify(responseBody)) + + // END OF NEW STUFF + }); +}).listen(8080); +``` + +## Echo Server Example + +Let's simplify the previous example to make a simple echo server, which just sends whatever data is received in the request right back in the response. All we need to do is grab the data from the request stream and write that data to the response stream, similar to what we did previously. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + let body = []; + request.on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + response.end(body); + }); +}).listen(8080); +``` + +Now let's tweak this. We want to only send an echo under the following conditions: + +* The request method is POST. +* The URL is `/echo`. + +In any other case, we want to simply respond with a 404. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + if (request.method === 'POST' && request.url === '/echo') { + let body = []; + request.on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + response.end(body); + }); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +> **Note:** By checking the URL in this way, we're doing a form of "routing". Other forms of routing can be as simple as `switch` statements or as complex as whole frameworks like [`express`][]. If you're looking for something that does routing and nothing else, try [`router`][]. + +Great! Now let's take a stab at simplifying this. Remember, the `request` object is a [`ReadableStream`][] and the `response` object is a [`WritableStream`][]. That means we can use [`pipe`][] to direct data from one to the other. That's exactly what we want for an echo server! + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + if (request.method === 'POST' && request.url === '/echo') { + request.pipe(response); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +Yay streams! + +We're not quite done yet though. As mentioned multiple times in this guide, errors can and do happen, and we need to deal with them. + +To handle errors on the request stream, we'll log the error to `stderr` and send a 400 status code to indicate a `Bad Request`. In a real-world application, though, we'd want to inspect the error to figure out what the correct status code and message would be. As usual with errors, you should consult the [`Error` documentation][]. + +On the response, we'll just log the error to `stderr`. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + request.on('error', (err) => { + console.error(err); + response.statusCode = 400; + response.end(); + }); + response.on('error', (err) => { + console.error(err); + }); + if (request.method === 'POST' && request.url === '/echo') { + request.pipe(response); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +We've now covered most of the basics of handling HTTP requests. At this point, you should be able to: + +* Instantiate an HTTP server with a request handler function, and have it listen on a port. +* Get headers, URL, method and body data from `request` objects. +* Make routing decisions based on URL and/or other data in `request` objects. +* Send headers, HTTP status codes and body data via `response` objects. +* Pipe data from `request` objects and to `response` objects. +* Handle stream errors in both the `request` and `response` streams. + +From these basics, Node.js HTTP servers for many typical use cases can be constructed. There are plenty of other things these APIs provide, so be sure to read through the API docs for [`EventEmitters`][], [`Streams`][], and [`HTTP`][]. diff --git a/locale/fr/docs/guides/backpressuring-in-streams.md b/locale/fr/docs/guides/backpressuring-in-streams.md new file mode 100644 index 000000000000..b3fc5c6d3375 --- /dev/null +++ b/locale/fr/docs/guides/backpressuring-in-streams.md @@ -0,0 +1,449 @@ +--- +title: Backpressuring in Streams +layout: docs.hbs +--- + +# Backpressuring in Streams + +There is a general problem that occurs during data handling called [`backpressure`][] and describes a buildup of data behind a buffer during data transfer. When the receiving end of the transfer has complex operations, or is slower for whatever reason, there is a tendency for data from the incoming source to accumulate, like a clog. + +To solve this problem, there must be a delegation system in place to ensure a smooth flow of data from one source to another. Different communities have resolved this issue uniquely to their programs, Unix pipes and TCP sockets are good examples of this, and is often times referred to as _flow control_. In Node.js, streams have been the adopted solution. + +The purpose of this guide is to further detail what backpressure is, and how exactly streams address this in Node.js' source code. The second part of the guide will introduce suggested best practices to ensure your application's code is safe and optimized when implementing streams. + +We assume a little familiarity with the general definition of [`backpressure`][], [`Buffer`][], and [`EventEmitters`][] in Node.js, as well as some experience with [`Stream`][]. If you haven't read through those docs, it's not a bad idea to take a look at the API documentation first, as it will help expand your understanding while reading this guide. + +## The Problem with Data Handling + +In a computer system, data is transferred from one process to another through pipes, sockets, and signals. In Node.js, we find a similar mechanism called [`Stream`][]. Streams are great! They do so much for Node.js and almost every part of the internal codebase utilizes that module. As a developer, you are more than encouraged to use them too! + +```javascript +const readline = require('readline'); + +// process.stdin and process.stdout are both instances of Streams. +const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout +}); + +rl.question('Why should you use streams? ', (answer) => { + console.log(`Maybe it's ${answer}, maybe it's because they are awesome! :)`); + + rl.close(); +}); +``` + +A good example of why the backpressure mechanism implemented through streams is a great optimization can be demonstrated by comparing the internal system tools from Node.js' [`Stream`][] implementation. + +In one scenario, we will take a large file (approximately ~9gb) and compress it using the familiar [`zip(1)`][] tool. + +``` +zip The.Matrix.1080p.mkv +``` + +While that will take a few minutes to complete, in another shell we may run a script that takes Node.js' module [`zlib`][], that wraps around another compression tool, [`gzip(1)`][]. + +```javascript +const gzip = require('zlib').createGzip(); +const fs = require('fs'); + +const inp = fs.createReadStream('The.Matrix.1080p.mkv'); +const out = fs.createWriteStream('The.Matrix.1080p.mkv.gz'); + +inp.pipe(gzip).pipe(out); +``` + +To test the results, try opening each compressed file. The file compressed by the [`zip(1)`][] tool will notify you the file is corrupt, whereas the compression finished by [`Stream`][] will decompress without error. + +Note: In this example, we use `.pipe()` to get the data source from one end to the other. However, notice there are no proper error handlers attached. If a chunk of data were to fail to be properly received, the `Readable` source or `gzip` stream will not be destroyed. [`pump`][] is a utility tool that would properly destroy all the streams in a pipeline if one of them fails or closes, and is a must have in this case! + +[`pump`][] is only necessary for Node.js 8.x or earlier, as for Node.js 10.x or later version, [`pipeline`][] is introduced to replace for [`pump`][]. This is a module method to pipe between streams forwarding errors and properly cleaning up and provide a callback when the pipeline is complete. + +Here is an example of using pipeline: + +```javascript +const { pipeline } = require('stream'); +const fs = require('fs'); +const zlib = require('zlib'); + +// Use the pipeline API to easily pipe a series of streams +// together and get notified when the pipeline is fully done. +// A pipeline to gzip a potentially huge video file efficiently: + +pipeline( + fs.createReadStream('The.Matrix.1080p.mkv'), + zlib.createGzip(), + fs.createWriteStream('The.Matrix.1080p.mkv.gz'), + (err) => { + if (err) { + console.error('Pipeline failed', err); + } else { + console.log('Pipeline succeeded'); + } + } +); +``` + +You can also call [`promisify`][] on pipeline to use it with `async` / `await`: + +```javascript +const stream = require('stream'); +const fs = require('fs'); +const zlib = require('zlib'); +const util = require('util'); + +const pipeline = util.promisify(stream.pipeline); + +async function run() { + try { + await pipeline( + fs.createReadStream('The.Matrix.1080p.mkv'), + zlib.createGzip(), + fs.createWriteStream('The.Matrix.1080p.mkv.gz'), + ); + console.log('Pipeline succeeded'); + } catch (err) { + console.error('Pipeline failed', err); + } +} +``` + +## Too Much Data, Too Quickly + +There are instances where a [`Readable`][] stream might give data to the [`Writable`][] much too quickly — much more than the consumer can handle! + +When that occurs, the consumer will begin to queue all the chunks of data for later consumption. The write queue will get longer and longer, and because of this more data must be kept in memory until the entire process has completed. + +Writing to a disk is a lot slower than reading from a disk, thus, when we are trying to compress a file and write it to our hard disk, backpressure will occur because the write disk will not be able to keep up with the speed from the read. + +```javascript +// Secretly the stream is saying: "whoa, whoa! hang on, this is way too much!" +// Data will begin to build up on the read-side of the data buffer as +// `write` tries to keep up with the incoming data flow. +inp.pipe(gzip).pipe(outputFile); +``` + +This is why a backpressure mechanism is important. If a backpressure system was not present, the process would use up your system's memory, effectively slowing down other processes, and monopolizing a large part of your system until completion. + +This results in a few things: + +* Slowing down all other current processes +* A very overworked garbage collector +* Memory exhaustion + +In the following examples we will take out the [return value](https://github.com/nodejs/node/blob/55c42bc6e5602e5a47fb774009cfe9289cb88e71/lib/_stream_writable.js#L239) of the `.write()` function and change it to `true`, which effectively disables backpressure support in Node.js core. In any reference to 'modified' binary, we are talking about running the `node` binary without the `return ret;` line, and instead with the replaced `return true;`. + +## Excess Drag on Garbage Collection + +Let's take a look at a quick benchmark. Using the same example from above, we ran a few time trials to get a median time for both binaries. + +``` + trial (#) | `node` binary (ms) | modified `node` binary (ms) +================================================================= + 1 | 56924 | 55011 + 2 | 52686 | 55869 + 3 | 59479 | 54043 + 4 | 54473 | 55229 + 5 | 52933 | 59723 +================================================================= +average time: | 55299 | 55975 +``` + +Both take around a minute to run, so there's not much of a difference at all, but let's take a closer look to confirm whether our suspicions are correct. We use the Linux tool [`dtrace`][] to evaluate what's happening with the V8 garbage collector. + +The GC (garbage collector) measured time indicates the intervals of a full cycle of a single sweep done by the garbage collector: + +``` +approx. time (ms) | GC (ms) | modified GC (ms) +================================================= + 0 | 0 | 0 + 1 | 0 | 0 + 40 | 0 | 2 + 170 | 3 | 1 + 300 | 3 | 1 + + * * * + * * * + * * * + + 39000 | 6 | 26 + 42000 | 6 | 21 + 47000 | 5 | 32 + 50000 | 8 | 28 + 54000 | 6 | 35 +``` + +While the two processes start off the same and seem to work the GC at the same rate, it becomes evident that after a few seconds with a properly working backpressure system in place, it spreads the GC load across consistent intervals of 4-8 milliseconds until the end of the data transfer. + +However, when a backpressure system is not in place, the V8 garbage collection starts to drag out. The normal binary called the GC approximately **75** times in a minute, whereas, the modified binary fires only **36** times. + +This is the slow and gradual debt accumulating from growing memory usage. As data gets transferred, without a backpressure system in place, more memory is being used for each chunk transfer. + +The more memory that is being allocated, the more the GC has to take care of in one sweep. The bigger the sweep, the more the GC needs to decide what can be freed up, and scanning for detached pointers in a larger memory space will consume more computing power. + +## Memory Exhaustion + +To determine the memory consumption of each binary, we've clocked each process with `/usr/bin/time -lp sudo ./node ./backpressure-example/zlib.js` individually. + +This is the output on the normal binary: + +``` +Respecting the return value of .write() +============================================= +real 58.88 +user 56.79 +sys 8.79 + 87810048 maximum resident set size + 0 average shared memory size + 0 average unshared data size + 0 average unshared stack size + 19427 page reclaims + 3134 page faults + 0 swaps + 5 block input operations + 194 block output operations + 0 messages sent + 0 messages received + 1 signals received + 12 voluntary context switches + 666037 involuntary context switches +``` + +The maximum byte size occupied by virtual memory turns out to be approximately 87.81 mb. + +And now changing the [return value](https://github.com/nodejs/node/blob/55c42bc6e5602e5a47fb774009cfe9289cb88e71/lib/_stream_writable.js#L239) of the [`.write()`][] function, we get: + +``` +Without respecting the return value of .write(): +================================================== +real 54.48 +user 53.15 +sys 7.43 +1524965376 maximum resident set size + 0 average shared memory size + 0 average unshared data size + 0 average unshared stack size + 373617 page reclaims + 3139 page faults + 0 swaps + 18 block input operations + 199 block output operations + 0 messages sent + 0 messages received + 1 signals received + 25 voluntary context switches + 629566 involuntary context switches +``` + +The maximum byte size occupied by virtual memory turns out to be approximately 1.52 gb. + +Without streams in place to delegate the backpressure, there is an order of magnitude greater of memory space being allocated - a huge margin of difference between the same process! + +This experiment shows how optimized and cost-effective Node.js' backpressure mechanism is for your computing system. Now, let's do a break down on how it works! + +## How Does Backpressure Resolve These Issues? + +There are different functions to transfer data from one process to another. In Node.js, there is an internal built-in function called [`.pipe()`][]. There are [other packages](https://github.com/sindresorhus/awesome-nodejs#streams) out there you can use too! Ultimately though, at the basic level of this process, we have two separate components: the _source_ of the data and the _consumer_. + +When [`.pipe()`][] is called from the source, it signals to the consumer that there is data to be transferred. The pipe function helps to set up the appropriate backpressure closures for the event triggers. + +In Node.js the source is a [`Readable`][] stream and the consumer is the [`Writable`][] stream (both of these may be interchanged with a [`Duplex`][] or a [`Transform`][] stream, but that is out-of-scope for this guide). + +The moment that backpressure is triggered can be narrowed exactly to the return value of a [`Writable`][]'s [`.write()`][] function. This return value is determined by a few conditions, of course. + +In any scenario where the data buffer has exceeded the [`highWaterMark`][] or the write queue is currently busy, [`.write()`][] will return `false`. + +When a `false` value is returned, the backpressure system kicks in. It will pause the incoming [`Readable`][] stream from sending any data and wait until the consumer is ready again. Once the data buffer is emptied, a [`'drain'`][] event will be emitted and resume the incoming data flow. + +Once the queue is finished, backpressure will allow data to be sent again. The space in memory that was being used will free itself up and prepare for the next batch of data. + +This effectively allows a fixed amount of memory to be used at any given time for a [`.pipe()`][] function. There will be no memory leakage, no infinite buffering, and the garbage collector will only have to deal with one area in memory! + +So, if backpressure is so important, why have you (probably) not heard of it? Well the answer is simple: Node.js does all of this automatically for you. + +That's so great! But also not so great when we are trying to understand how to implement our own custom streams. + +Note: In most machines, there is a byte size that determines when a buffer is full (which will vary across different machines). Node.js allows you to set your own custom [`highWaterMark`][], but commonly, the default is set to 16kb (16384, or 16 for objectMode streams). In instances where you might want to raise that value, go for it, but do so with caution! + +## Lifecycle of `.pipe()` + +To achieve a better understanding of backpressure, here is a flow-chart on the lifecycle of a [`Readable`][] stream being [piped](https://nodejs.org/docs/latest/api/stream.html#stream_readable_pipe_destination_options) into a [`Writable`][] stream: + +``` + +===================+ + x--> Piping functions +--> src.pipe(dest) | + x are set up during |===================| + x the .pipe method. | Event callbacks | + +===============+ x |-------------------| + | Your Data | x They exist outside | .on('close', cb) | + +=======+=======+ x the data flow, but | .on('data', cb) | + | x importantly attach | .on('drain', cb) | + | x events, and their | .on('unpipe', cb) | ++---------v---------+ x respective callbacks. | .on('error', cb) | +| Readable Stream +----+ | .on('finish', cb) | ++-^-------^-------^-+ | | .on('end', cb) | + ^ | ^ | +-------------------+ + | | | | + | ^ | | + ^ ^ ^ | +-------------------+ +=================+ + ^ | ^ +----> Writable Stream +---------> .write(chunk) | + | | | +-------------------+ +=======+=========+ + | | | | + | ^ | +------------------v---------+ + ^ | +-> if (!chunk) | Is this chunk too big? | + ^ | | emit .end(); | Is the queue busy? | + | | +-> else +-------+----------------+---+ + | ^ | emit .write(); | | + | ^ ^ +--v---+ +---v---+ + | | ^-----------------------------------< No | | Yes | + ^ | +------+ +---v---+ + ^ | | + | ^ emit .pause(); +=================+ | + | ^---------------^-----------------------+ return false; <-----+---+ + | +=================+ | + | | + ^ when queue is empty +============+ | + ^------------^-----------------------< Buffering | | + | |============| | + +> emit .drain(); | ^Buffer^ | | + +> emit .resume(); +------------+ | + | ^Buffer^ | | + +------------+ add chunk to queue | + | <---^---------------------< + +============+ +``` + +Note: If you are setting up a pipeline to chain together a few streams to manipulate your data, you will most likely be implementing [`Transform`][] stream. + +In this case, your output from your [`Readable`][] stream will enter in the [`Transform`][] and will pipe into the [`Writable`][]. + +```javascript +Readable.pipe(Transformable).pipe(Writable); +``` + +Backpressure will be automatically applied, but note that both the incoming and outgoing `highWaterMark` of the [`Transform`][] stream may be manipulated and will effect the backpressure system. + +## Backpressure Guidelines + +Since [Node.js v0.10](https://nodejs.org/docs/v0.10.0/), the [`Stream`][] class has offered the ability to modify the behaviour of the [`.read()`][] or [`.write()`][] by using the underscore version of these respective functions ([`._read()`][] and [`._write()`][]). + +There are guidelines documented for [implementing Readable streams](https://nodejs.org/docs/latest/api/stream.html#stream_implementing_a_readable_stream) and [implementing Writable streams](https://nodejs.org/docs/latest/api/stream.html#stream_implementing_a_writable_stream). We will assume you've read these over, and the next section will go a little bit more in-depth. + +## Rules to Abide By When Implementing Custom Streams + +The golden rule of streams is **to always respect backpressure**. What constitutes as best practice is non-contradictory practice. So long as you are careful to avoid behaviours that conflict with internal backpressure support, you can be sure you're following good practice. + +In general, + +1. Never `.push()` if you are not asked. +2. Never call `.write()` after it returns false but wait for 'drain' instead. +3. Streams changes between different Node.js versions, and the library you use. Be careful and test things. + +Note: In regards to point 3, an incredibly useful package for building browser streams is [`readable-stream`][]. Rodd Vagg has written a [great blog post](https://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html) describing the utility of this library. In short, it provides a type of automated graceful degradation for [`Readable`][] streams, and supports older versions of browsers and Node.js. + +## Rules specific to Readable Streams + +So far, we have taken a look at how [`.write()`][] affects backpressure and have focused much on the [`Writable`][] stream. Because of Node.js' functionality, data is technically flowing downstream from [`Readable`][] to [`Writable`][]. However, as we can observe in any transmission of data, matter, or energy, the source is just as important as the destination and the [`Readable`][] stream is vital to how backpressure is handled. + +Both these processes rely on one another to communicate effectively, if the [`Readable`][] ignores when the [`Writable`][] stream asks for it to stop sending in data, it can be just as problematic to when the [`.write()`][]'s return value is incorrect. + +So, as well with respecting the [`.write()`][] return, we must also respect the return value of [`.push()`][] used in the [`._read()`][] method. If [`.push()`][] returns a `false` value, the stream will stop reading from the source. Otherwise, it will continue without pause. + +Here is an example of bad practice using [`.push()`][]: + +```javascript +// This is problematic as it completely ignores return value from push +// which may be a signal for backpressure from the destination stream! +class MyReadable extends Readable { + _read(size) { + let chunk; + while (null !== (chunk = getNextChunk())) { + this.push(chunk); + } + } +} +``` + +Additionally, from outside the custom stream, there are pratfalls for ignoring backpressure. In this counter-example of good practice, the application's code forces data through whenever it is available (signaled by the [`'data'` event][]): + +```javascript +// This ignores the backpressure mechanisms Node.js has set in place, +// and unconditionally pushes through data, regardless if the +// destination stream is ready for it or not. +readable.on('data', (data) => + writable.write(data) +); +``` + +## Rules specific to Writable Streams + +Recall that a [`.write()`][] may return true or false dependent on some conditions. Luckily for us, when building our own [`Writable`][] stream, the [`stream state machine`][] will handle our callbacks and determine when to handle backpressure and optimize the flow of data for us. + +However, when we want to use a [`Writable`][] directly, we must respect the [`.write()`][] return value and pay close attention to these conditions: + +* If the write queue is busy, [`.write()`][] will return false. +* If the data chunk is too large, [`.write()`][] will return false (the limit is indicated by the variable, [`highWaterMark`][]). +```javascript +// This writable is invalid because of the async nature of JavaScript callbacks. +// Without a return statement for each callback prior to the last, +// there is a great chance multiple callbacks will be called. +class MyWritable extends Writable { + _write(chunk, encoding, callback) { + if (chunk.toString().indexOf('a') >= 0) + callback(); + else if (chunk.toString().indexOf('b') >= 0) + callback(); + callback(); + } +} + +// The proper way to write this would be: + if (chunk.contains('a')) + return callback(); + if (chunk.contains('b')) + return callback(); + callback(); +``` + +There are also some things to look out for when implementing [`._writev()`][]. The function is coupled with [`.cork()`][], but there is a common mistake when writing: + +```javascript +// Using .uncork() twice here makes two calls on the C++ layer, rendering the +// cork/uncork technique useless. +ws.cork(); +ws.write('hello '); +ws.write('world '); +ws.uncork(); + +ws.cork(); +ws.write('from '); +ws.write('Matteo'); +ws.uncork(); + +// The correct way to write this is to utilize process.nextTick(), which fires +// on the next event loop. +ws.cork(); +ws.write('hello '); +ws.write('world '); +process.nextTick(doUncork, ws); + +ws.cork(); +ws.write('from '); +ws.write('Matteo'); +process.nextTick(doUncork, ws); + +// As a global function. +function doUncork(stream) { + stream.uncork(); +} +``` + +[`.cork()`][] can be called as many times we want, we just need to be careful to call [`.uncork()`][] the same amount of times to make it flow again. + +## Conclusion + +Streams are an often used module in Node.js. They are important to the internal structure, and for developers, to expand and connect across the Node.js modules ecosystem. + +Hopefully, you will now be able to troubleshoot, safely code your own [`Writable`][] and [`Readable`][] streams with backpressure in mind, and share your knowledge with colleagues and friends. + +Be sure to read up more on [`Stream`][] for other API functions to help improve and unleash your streaming capabilities when building an application with Node.js. diff --git a/locale/fr/docs/guides/blocking-vs-non-blocking.md b/locale/fr/docs/guides/blocking-vs-non-blocking.md new file mode 100644 index 000000000000..579d2c912e5c --- /dev/null +++ b/locale/fr/docs/guides/blocking-vs-non-blocking.md @@ -0,0 +1,103 @@ +--- +title: Overview of Blocking vs Non-Blocking +layout: docs.hbs +--- + +# Overview of Blocking vs Non-Blocking + +This overview covers the difference between **blocking** and **non-blocking** calls in Node.js. This overview will refer to the event loop and libuv but no prior knowledge of those topics is required. Readers are assumed to have a basic understanding of the JavaScript language and Node.js [callback pattern](/en/knowledge/getting-started/control-flow/what-are-callbacks/). + +> "I/O" refers primarily to interaction with the system's disk and network supported by [libuv](https://libuv.org/). + +## Blocking + +**Blocking** is when the execution of additional JavaScript in the Node.js process must wait until a non-JavaScript operation completes. This happens because the event loop is unable to continue running JavaScript while a **blocking** operation is occurring. + +In Node.js, JavaScript that exhibits poor performance due to being CPU intensive rather than waiting on a non-JavaScript operation, such as I/O, isn't typically referred to as **blocking**. Synchronous methods in the Node.js standard library that use libuv are the most commonly used **blocking** operations. Native modules may also have **blocking** methods. + +All of the I/O methods in the Node.js standard library provide asynchronous versions, which are **non-blocking**, and accept callback functions. Some methods also have **blocking** counterparts, which have names that end with `Sync`. + +## Comparing Code + +**Blocking** methods execute **synchronously** and **non-blocking** methods execute **asynchronously**. + +Using the File System module as an example, this is a **synchronous** file read: + +```js +const fs = require('fs'); +const data = fs.readFileSync('/file.md'); // blocks here until file is read +``` + +And here is an equivalent **asynchronous** example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; +}); +``` + +The first example appears simpler than the second but has the disadvantage of the second line **blocking** the execution of any additional JavaScript until the entire file is read. Note that in the synchronous version if an error is thrown it will need to be caught or the process will crash. In the asynchronous version, it is up to the author to decide whether an error should throw as shown. + +Let's expand our example a little bit: + +```js +const fs = require('fs'); +const data = fs.readFileSync('/file.md'); // blocks here until file is read +console.log(data); +moreWork(); // will run after console.log +``` + +And here is a similar, but not equivalent asynchronous example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; + console.log(data); +}); +moreWork(); // will run before console.log +``` + +In the first example above, `console.log` will be called before `moreWork()`. In the second example `fs.readFile()` is **non-blocking** so JavaScript execution can continue and `moreWork()` will be called first. The ability to run `moreWork()` without waiting for the file read to complete is a key design choice that allows for higher throughput. + +## Concurrency and Throughput + +JavaScript execution in Node.js is single threaded, so concurrency refers to the event loop's capacity to execute JavaScript callback functions after completing other work. Any code that is expected to run in a concurrent manner must allow the event loop to continue running as non-JavaScript operations, like I/O, are occurring. + +As an example, let's consider a case where each request to a web server takes 50ms to complete and 45ms of that 50ms is database I/O that can be done asynchronously. Choosing **non-blocking** asynchronous operations frees up that 45ms per request to handle other requests. This is a significant difference in capacity just by choosing to use **non-blocking** methods instead of **blocking** methods. + +The event loop is different than models in many other languages where additional threads may be created to handle concurrent work. + +## Dangers of Mixing Blocking and Non-Blocking Code + +There are some patterns that should be avoided when dealing with I/O. Let's look at an example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; + console.log(data); +}); +fs.unlinkSync('/file.md'); +``` + +In the above example, `fs.unlinkSync()` is likely to be run before `fs.readFile()`, which would delete `file.md` before it is actually read. A better way to write this, which is completely **non-blocking** and guaranteed to execute in the correct order is: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (readFileErr, data) => { + if (readFileErr) throw readFileErr; + console.log(data); + fs.unlink('/file.md', (unlinkErr) => { + if (unlinkErr) throw unlinkErr; + }); +}); +``` + +The above places a **non-blocking** call to `fs.unlink()` within the callback of `fs.readFile()` which guarantees the correct order of operations. + +## Additional Resources + +* [libuv](https://libuv.org/) +* [About Node.js](/en/about/) diff --git a/locale/fr/docs/guides/buffer-constructor-deprecation.md b/locale/fr/docs/guides/buffer-constructor-deprecation.md new file mode 100644 index 000000000000..8f5611e2e430 --- /dev/null +++ b/locale/fr/docs/guides/buffer-constructor-deprecation.md @@ -0,0 +1,220 @@ +--- +title: Porting to the Buffer.from()/Buffer.alloc() API +layout: docs.hbs +--- + +# Porting to the `Buffer.from()`/`Buffer.alloc()` API + +## Overview + +This guide explains how to migrate to safe `Buffer` constructor methods. The migration fixes the following deprecation warning: + +
+The Buffer() and new Buffer() constructors are not recommended for use due to security and usability concerns. Please use the new Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() construction methods instead. +
+ +* [Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x](#variant-1) (*recommended*) +* [Variant 2: Use a polyfill](#variant-2) +* [Variant 3: Manual detection, with safeguards](#variant-3) + +### Finding problematic bits of code using `grep` + +Just run `grep -nrE '[^a-zA-Z](Slow)?Buffer\s*\(' --exclude-dir node_modules`. + +It will find all the potentially unsafe places in your own code (with some considerably unlikely exceptions). + +### Finding problematic bits of code using Node.js 8 + +If you’re using Node.js ≥ 8.0.0 (which is recommended), Node.js exposes multiple options that help with finding the relevant pieces of code: + +* `--trace-warnings` will make Node.js show a stack trace for this warning and other warnings that are printed by Node.js. +* `--trace-deprecation` does the same thing, but only for deprecation warnings. +* `--pending-deprecation` will show more types of deprecation warnings. In particular, it will show the `Buffer()` deprecation warning, even on Node.js 8. + +You can set these flags using environment variables: + +```bash +$ export NODE_OPTIONS='--trace-warnings --pending-deprecation' +$ cat example.js +'use strict'; +const foo = new Buffer('foo'); +$ node example.js +(node:7147) [DEP0005] DeprecationWarning: The Buffer() and new Buffer() constructors are not recommended for use due to security and usability concerns. Please use the new Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() construction methods instead. + at showFlaggedDeprecation (buffer.js:127:13) + at new Buffer (buffer.js:148:3) + at Object. (/path/to/example.js:2:13) + [... more stack trace lines ...] +``` + +### Finding problematic bits of code using linters + +ESLint rules [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) also find calls to deprecated `Buffer()` API. Those rules are included in some presets. + +There is a drawback, though, that it doesn't always [work correctly](https://github.com/chalker/safer-buffer#why-not-safe-buffer) when `Buffer` is overridden e.g. with a polyfill, so recommended is a combination of this and some other method described above. + +## Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x + +This is the recommended solution nowadays that would imply only minimal overhead. + +The Node.js 5.x release line has been unsupported since July 2016, and the Node.js 4.x release line reaches its End of Life in April 2018 (→ [Schedule](https://github.com/nodejs/Release#release-schedule)). This means that these versions of Node.js will *not* receive any updates, even in case of security issues, so using these release lines should be avoided, if at all possible. + +What you would do in this case is to convert all `new Buffer()` or `Buffer()` calls to use `Buffer.alloc()` or `Buffer.from()`, in the following way: + +* For `new Buffer(number)`, replace it with `Buffer.alloc(number)`. +* For `new Buffer(string)` (or `new Buffer(string, encoding)`), replace it with `Buffer.from(string)` (or `Buffer.from(string, encoding)`). +* For all other combinations of arguments (these are much rarer), also replace `new Buffer(...arguments)` with `Buffer.from(...arguments)`. + +Note that `Buffer.alloc()` is also _faster_ on the current Node.js versions than `new Buffer(size).fill(0)`, which is what you would otherwise need to ensure zero-filling. + +Enabling ESLint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) is recommended to avoid accidental unsafe `Buffer` API usage. + +There is also a [JSCodeshift codemod](https://github.com/joyeecheung/node-dep-codemod#dep005) for automatically migrating `Buffer` constructors to `Buffer.alloc()` or `Buffer.from()`. Note that it currently only works with cases where the arguments are literals or where the constructor is invoked with two arguments. + +_If you currently support those older Node.js versions and dropping support for them is not possible, or if you support older branches of your packages, consider using [Variant 2](#variant-2) or [Variant 3](#variant-3) on older branches, so people using those older branches will also receive the fix. That way, you will eradicate potential issues caused by unguarded `Buffer` API usage and your users will not observe a runtime deprecation warning when running your code on Node.js 10._ + +## Variant 2: Use a polyfill + +There are three different polyfills available: + +* **[safer-buffer](https://www.npmjs.com/package/safer-buffer)** is a drop-in replacement for the entire `Buffer` API, that will _throw_ when using `new Buffer()`. + + You would take exactly the same steps as in [Variant 1](#variant-1), but with a polyfill `const Buffer = require('safer-buffer').Buffer` in all files where you use the new `Buffer` API. + + Do not use the old `new Buffer()` API. In any files where the line above is added, using old `new Buffer()` API will _throw_. + +* **[buffer-from](https://www.npmjs.com/package/buffer-from) and/or [buffer-alloc](https://www.npmjs.com/package/buffer-alloc)** are [ponyfills](https://ponyfill.com/) for their respective part of the `Buffer` API. You only need to add the package(s) corresponding to the API you are using. + + You would import the module needed with an appropriate name, e.g. `const bufferFrom = require('buffer-from')` and then use that instead of the call to `new Buffer()`, e.g. `new Buffer('test')` becomes `bufferFrom('test')`. + + A downside with this approach is slightly more code changes to migrate off them (as you would be using e.g. `Buffer.from()` under a different name). + +* **[safe-buffer](https://www.npmjs.com/package/safe-buffer)** is also a drop-in replacement for the entire `Buffer` API, but using `new Buffer()` will still work as before. + + A downside to this approach is that it will allow you to also use the older `new Buffer()` API in your code, which is problematic since it can cause issues in your code, and will start emitting runtime deprecation warnings starting with Node.js 10 ([read more here](https://github.com/chalker/safer-buffer#why-not-safe-buffer)). + +Note that in either case, it is important that you also remove all calls to the old `Buffer` API manually — just throwing in `safe-buffer` doesn't fix the problem by itself, it just provides a polyfill for the new API. I have seen people doing that mistake. + +Enabling ESLint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) is recommended. + +_Don't forget to drop the polyfill usage once you drop support for Node.js < 4.5.0._ + +## Variant 3 — Manual detection, with safeguards + +This is useful if you create `Buffer` instances in only a few places (e.g. one), or you have your own wrapper around them. + +### `Buffer(0)` + +This special case for creating empty buffers can be safely replaced with `Buffer.concat([])`, which returns the same result all the way down to Node.js 0.8.x. + +### `Buffer(notNumber)` + +Before: + +```js +const buf = new Buffer(notNumber, encoding); +``` + +After: + +```js +let buf; +if (Buffer.from && Buffer.from !== Uint8Array.from) { + buf = Buffer.from(notNumber, encoding); +} else { + if (typeof notNumber === 'number') { + throw new Error('The "size" argument must be not of type number.'); + } + buf = new Buffer(notNumber, encoding); +} +``` + +`encoding` is optional. + +Note that the `typeof notNumber` before `new Buffer()` is required (for cases when `notNumber` argument is not hard-coded) and _is not caused by the deprecation of `Buffer` constructor_ — it's exactly _why_ the `Buffer` constructor is deprecated. Ecosystem packages lacking this type-check caused numerous security issues — situations when unsanitized user input could end up in the `Buffer(arg)` create problems ranging from DoS to leaking sensitive information to the attacker from the process memory. + +When `notNumber` argument is hardcoded (e.g. literal `"abc"` or `[0,1,2]`), the `typeof` check can be omitted. + +Also, note that using TypeScript does not fix this problem for you — when libs written in `TypeScript` are used from JS, or when user input ends up there — it behaves exactly as pure JS, as all type checks are translation-time only and are not present in the actual JS code which TS compiles to. + +### `Buffer(number)` + +For Node.js 0.10.x (and below) support: + +```js +var buf; +if (Buffer.alloc) { + buf = Buffer.alloc(number); +} else { + buf = new Buffer(number); + buf.fill(0); +} +``` + +Otherwise (Node.js ≥ 0.12.x): + +```js +const buf = Buffer.alloc ? Buffer.alloc(number) : new Buffer(number).fill(0); +``` + +## Regarding `Buffer.allocUnsafe()` + +Be extra cautious when using `Buffer.allocUnsafe()`: + +* Don't use it if you don't have a good reason to + * e.g. you probably won't ever see a performance difference for small buffers, in fact, those might be even faster with `Buffer.alloc()`, + * if your code is not in the hot code path — you also probably won't notice a difference, + * keep in mind that zero-filling minimizes the potential risks. +* If you use it, make sure that you never return the buffer in a partially-filled state, + * if you are writing to it sequentially — always truncate it to the actual written length + +Errors in handling buffers allocated with `Buffer.allocUnsafe()` could result in various issues, ranged from undefined behavior of your code to sensitive data (user input, passwords, certs) leaking to the remote attacker. + +_Note that the same applies to `new Buffer()` usage without zero-filling, depending on the Node.js version (and lacking type checks also adds DoS to the list of potential problems)._ + +## FAQ + +### What is wrong with the + +`Buffer` constructor? + +The `Buffer` constructor could be used to create a buffer in many different ways: + +* `new Buffer(42)` creates a `Buffer` of 42 bytes. Before Node.js 8, this buffer contained *arbitrary memory* for performance reasons, which could include anything ranging from program source code to passwords and encryption keys. +* `new Buffer('abc')` creates a `Buffer` that contains the UTF-8-encoded version of the string `'abc'`. A second argument could specify another encoding: for example, `new Buffer(string, 'base64')` could be used to convert a Base64 string into the original sequence of bytes that it represents. +* There are several other combinations of arguments. + +This meant that in code like `var buffer = new Buffer(foo);`, *it is not possible to tell what exactly the contents of the generated buffer are* without knowing the type of `foo`. + +Sometimes, the value of `foo` comes from an external source. For example, this function could be exposed as a service on a web server, converting a UTF-8 string into its Base64 form: + +```js +function stringToBase64(req, res) { + // The request body should have the format of `{ string: 'foobar' }`. + const rawBytes = new Buffer(req.body.string); + const encoded = rawBytes.toString('base64'); + res.end({ encoded }); +} +``` + +Note that this code does *not* validate the type of `req.body.string`: + +* `req.body.string` is expected to be a string. If this is the case, all goes well. +* `req.body.string` is controlled by the client that sends the request. +* If `req.body.string` is the *number* `50`, the `rawBytes` would be `50` bytes: + * Before Node.js 8, the content would be uninitialized + * After Node.js 8, the content would be `50` bytes with the value `0` + +Because of the missing type check, an attacker could intentionally send a number as part of the request. Using this, they can either: + +* Read uninitialized memory. This **will** leak passwords, encryption keys and other kinds of sensitive information. (Information leak) +* Force the program to allocate a large amount of memory. For example, when specifying `500000000` as the input value, each request will allocate 500MB of memory. This can be used to either exhaust the memory available of a program completely and make it crash, or slow it down significantly. (Denial of Service) + +Both of these scenarios are considered serious security issues in a real-world web server context. + +When using `Buffer.from(req.body.string)` instead, passing a number will always throw an exception instead, giving a controlled behavior that can always be handled by the program. + +### The + +`Buffer()` constructor has been deprecated for a while. Is this really an issue? + +Surveys of code in the `npm` ecosystem have shown that the `Buffer()` constructor is still widely used. This includes new code, and overall usage of such code has actually been *increasing*. diff --git a/locale/fr/docs/guides/debugging-getting-started.md b/locale/fr/docs/guides/debugging-getting-started.md new file mode 100644 index 000000000000..5abb1fa1c270 --- /dev/null +++ b/locale/fr/docs/guides/debugging-getting-started.md @@ -0,0 +1,189 @@ +--- +title: Debugging - Getting Started +layout: docs.hbs +--- + +# Debugging Guide + +This guide will help you get started debugging your Node.js apps and scripts. + +## Enable Inspector + +When started with the `--inspect` switch, a Node.js process listens for a debugging client. By default, it will listen at host and port 127.0.0.1:9229. Each process is also assigned a unique [UUID](https://tools.ietf.org/html/rfc4122). + +Inspector clients must know and specify host address, port, and UUID to connect. A full URL will look something like `ws://127.0.0.1:9229/0f2c936f-b1cd-4ac9-aab3-f63b0f33d55e`. + +Node.js will also start listening for debugging messages if it receives a `SIGUSR1` signal. (`SIGUSR1` is not available on Windows.) In Node.js 7 and earlier, this activates the legacy Debugger API. In Node.js 8 and later, it will activate the Inspector API. + +--- +## Security Implications + +Since the debugger has full access to the Node.js execution environment, a malicious actor able to connect to this port may be able to execute arbitrary code on behalf of the Node.js process. It is important to understand the security implications of exposing the debugger port on public and private networks. + +### Exposing the debug port publicly is unsafe + +If the debugger is bound to a public IP address, or to 0.0.0.0, any clients that can reach your IP address will be able to connect to the debugger without any restriction and will be able to run arbitrary code. + +By default `node --inspect` binds to 127.0.0.1. You explicitly need to provide a public IP address or 0.0.0.0, etc., if you intend to allow external connections to the debugger. Doing so may expose you to a potentially significant security threat. We suggest you ensure appropriate firewalls and access controls in place to prevent a security exposure. + +See the section on '[Enabling remote debugging scenarios](#enabling-remote-debugging-scenarios)' on some advice on how to safely allow remote debugger clients to connect. + +### Local applications have full access to the inspector + +Even if you bind the inspector port to 127.0.0.1 (the default), any applications running locally on your machine will have unrestricted access. This is by design to allow local debuggers to be able to attach conveniently. + +### Browsers, WebSockets and same-origin policy + +Websites open in a web-browser can make WebSocket and HTTP requests under the browser security model. An initial HTTP connection is necessary to obtain a unique debugger session id. The same-origin-policy prevents websites from being able to make this HTTP connection. For additional security against [DNS rebinding attacks](https://en.wikipedia.org/wiki/DNS_rebinding), Node.js verifies that the 'Host' headers for the connection either specify an IP address or `localhost` or `localhost6` precisely. + +These security policies disallow connecting to a remote debug server by specifying the hostname. You can work-around this restriction by specifying either the IP address or by using ssh tunnels as described below. + +## Inspector Clients + +Several commercial and open source tools can connect to the Node.js Inspector. Basic info on these follows: + +### [node-inspect](https://github.com/nodejs/node-inspect) + +* CLI Debugger supported by the Node.js Foundation which uses the [Inspector Protocol](https://chromedevtools.github.io/debugger-protocol-viewer/v8/). +* A version is bundled with Node.js and can be used with `node inspect myscript.js`. +* The latest version can also be installed independently (e.g. `npm install -g node-inspect`) and used with `node-inspect myscript.js`. + +### [Chrome DevTools](https://github.com/ChromeDevTools/devtools-frontend) 55+, [Microsoft Edge](https://www.microsoftedgeinsider.com) + +* **Option 1**: Open `chrome://inspect` in a Chromium-based browser or `edge://inspect` in Edge. Click the Configure button and ensure your target host and port are listed. +* **Option 2**: Copy the `devtoolsFrontendUrl` from the output of `/json/list` (see above) or the --inspect hint text and paste into Chrome. + +### [Visual Studio Code](https://github.com/microsoft/vscode) 1.10+ + +* In the Debug panel, click the settings icon to open `.vscode/launch.json`. Select "Node.js" for initial setup. + +### [Visual Studio](https://github.com/Microsoft/nodejstools) 2017 + +* Choose "Debug > Start Debugging" from the menu or hit F5. +* [Detailed instructions](https://github.com/Microsoft/nodejstools/wiki/Debugging). + +### [JetBrains WebStorm](https://www.jetbrains.com/webstorm/) 2017.1+ and other JetBrains IDEs + +* Create a new Node.js debug configuration and hit Debug. `--inspect` will be used by default for Node.js 7+. To disable uncheck `js.debugger.node.use.inspect` in the IDE Registry. + +### [chrome-remote-interface](https://github.com/cyrus-and/chrome-remote-interface) + +* Library to ease connections to Inspector Protocol endpoints. + +### [Gitpod](https://www.gitpod.io) + +* Start a Node.js debug configuration from the `Debug` view or hit `F5`. [Detailed instructions](https://medium.com/gitpod/debugging-node-js-applications-in-theia-76c94c76f0a1) + +### [Eclipse IDE](https://eclipse.org/eclipseide) with Eclipse Wild Web Developer extension + +* From a .js file, choose "Debug As... > Node program", or +* Create a Debug Configuration to attach debugger to running Node.js application (already started with `--inspect`). + +--- + +## Command-line options + +The following table lists the impact of various runtime flags on debugging: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagMeaning
--inspect +
    +
  • Enable inspector agent
  • +
  • Listen on default address and port (127.0.0.1:9229)
  • +
+
--inspect=[host:port] +
    +
  • Enable inspector agent
  • +
  • Bind to address or hostname host (default: 127.0.0.1)
  • +
  • Listen on port port (default: 9229)
  • +
+
--inspect-brk +
    +
  • Enable inspector agent
  • +
  • Listen on default address and port (127.0.0.1:9229)
  • +
  • Break before user code starts
  • +
+
--inspect-brk=[host:port] +
    +
  • Enable inspector agent
  • +
  • Bind to address or hostname host (default: 127.0.0.1)
  • +
  • Listen on port port (default: 9229)
  • +
  • Break before user code starts
  • +
+
node inspect script.js +
    +
  • Spawn child process to run user's script under --inspect flag; + and use main process to run CLI debugger.
  • +
+
node inspect --port=xxxx script.js +
    +
  • Spawn child process to run user's script under --inspect flag; + and use main process to run CLI debugger.
  • +
  • Listen on port port (default: 9229)
  • +
+
+ +--- + +## Enabling remote debugging scenarios + +We recommend that you never have the debugger listen on a public IP address. If you need to allow remote debugging connections we recommend the use of ssh tunnels instead. We provide the following example for illustrative purposes only. Please understand the security risk of allowing remote access to a privileged service before proceeding. + +Let's say you are running Node.js on a remote machine, remote.example.com, that you want to be able to debug. On that machine, you should start the node process with the inspector listening only to localhost (the default). + +```bash +node --inspect server.js +``` + +Now, on your local machine from where you want to initiate a debug client connection, you can setup an ssh tunnel: + +```bash +ssh -L 9221:localhost:9229 user@remote.example.com +``` + +This starts a ssh tunnel session where a connection to port 9221 on your local machine will be forwarded to port 9229 on remote.example.com. You can now attach a debugger such as Chrome DevTools or Visual Studio Code to localhost:9221, which should be able to debug as if the Node.js application was running locally. + +--- + +## Legacy Debugger + +**The legacy debugger has been deprecated as of Node.js 7.7.0. Please use `--inspect` and Inspector instead.** + +When started with the **--debug** or **--debug-brk** switches in version 7 and earlier, Node.js listens for debugging commands defined by the discontinued V8 Debugging Protocol on a TCP port, by default `5858`. Any debugger client which speaks this protocol can connect to and debug the running process; a couple popular ones are listed below. + +The V8 Debugging Protocol is no longer maintained or documented. + +### [Built-in Debugger](https://nodejs.org/dist/latest-v6.x/docs/api/debugger.html) + +Start `node debug script_name.js` to start your script under the builtin command-line debugger. Your script starts in another Node.js process started with the `--debug-brk` option, and the initial Node.js process runs the `_debugger.js` script and connects to your target. + +### [node-inspector](https://github.com/node-inspector/node-inspector) + +Debug your Node.js app with Chrome DevTools by using an intermediary process which translates the Inspector Protocol used in Chromium to the V8 Debugger protocol used in Node.js. + + diff --git a/locale/fr/docs/guides/diagnostics-flamegraph.md b/locale/fr/docs/guides/diagnostics-flamegraph.md new file mode 100644 index 000000000000..678d5040802a --- /dev/null +++ b/locale/fr/docs/guides/diagnostics-flamegraph.md @@ -0,0 +1,121 @@ +--- +title: Diagnostics - Flame Graphs +layout: docs.hbs +--- + +# Flame Graphs + +## What's a flame graph useful for? + +Flame graphs are a way of visualizing CPU time spent in functions. They can help you pin down where you spend too much time doing synchronous operations. + +## How to create a flame graph + +You might have heard creating a flame graph for Node.js is difficult, but that's not true (anymore). Solaris vms are no longer needed for flame graphs! + +Flame graphs are generated from `perf` output, which is not a node-specific tool. While it's the most powerful way to visualize CPU time spent, it may have issues with how JavaScript code is optimized in Node.js 8 and above. See [perf output issues](#perf-output-issues) section below. + +### Use a pre-packaged tool + +If you want a single step that produces a flame graph locally, try [0x](https://www.npmjs.com/package/0x) + +For diagnosing production deployments, read these notes: [0x production servers](https://github.com/davidmarkclements/0x/blob/master/docs/production-servers.md) + +### Create a flame graph with system perf tools + +The purpose of this guide is to show steps involved in creating a flame graph and keep you in control of each step. + +If you want to understand each step better, take a look at the sections that follow where we go into more detail. + +Now let's get to work. + +1. Install `perf` (usually available through the linux-tools-common package if not already installed) +2. try running `perf` - it might complain about missing kernel modules, install them too +3. run node with perf enabled (see [perf output issues](#perf-output-issues) for tips specific to Node.js versions) + + ```bash + perf record -e cycles:u -g -- node --perf-basic-prof app.js + ``` + +4. disregard warnings unless they're saying you can't run perf due to missing packages; you may get some warnings about not being able to access kernel module samples which you're not after anyway. +5. Run `perf script > perfs.out` to generate the data file you'll visualize in a moment. It's useful to [apply some cleanup](#filtering-out-node-js-internal-functions) for a more readable graph +6. install stackvis if not yet installed `npm i -g stackvis` +7. run `stackvis perf < perfs.out > flamegraph.htm` + +Now open the flame graph file in your favorite browser and watch it burn. It's color-coded so you can focus on the most saturated orange bars first. They're likely to represent CPU heavy functions. + +Worth mentioning - if you click an element of a flame graph a zoom-in of its surroundings will get displayed above the graph. + +### Using `perf` to sample a running process + +This is great for recording flame graph data from an already running process that you don't want to interrupt. Imagine a production process with a hard to reproduce issue. + +```bash +perf record -F99 -p `pgrep -n node` -g -- sleep 3 +``` + +Wait, what is that `sleep 3` for? It's there to keep the perf running - despite `-p` option pointing to a different pid, the command needs to be executed on a process and end with it. perf runs for the life of the command you pass to it, whether or not you're actually profiling that command. `sleep 3` ensures that perf runs for 3 seconds. + +Why is `-F` (profiling frequency) set to 99? It's a reasonable default. You can adjust if you want. `-F99` tells perf to take 99 samples per second, for more precision increase the value. Lower values should produce less output with less precise results. Precision you need depends on how long your CPU intensive functions really run. If you're looking for the reason of a noticeable slowdown, 99 frames per second should be more than enough. + +After you get that 3 second perf record, proceed with generating the flame graph with the last two steps from above. + +### Filtering out Node.js internal functions + +Usually you just want to look at the performance of your own calls, so filtering out Node.js and V8 internal functions can make the graph much easier to read. You can clean up your perf file with: + +```bash +sed -i \ + -e "/( __libc_start| LazyCompile | v8::internal::| Builtin:| Stub:| LoadIC:|\[unknown\]| LoadPolymorphicIC:)/d" \ + -e 's/ LazyCompile:[*~]\?/ /' \ + perfs.out +``` + +If you read your flame graph and it seems odd, as if something is missing in the key function taking up most time, try generating your flame graph without the filters - maybe you got a rare case of an issue with Node.js itself. + +### Node.js's profiling options + +`--perf-basic-prof-only-functions` and `--perf-basic-prof` are the two that are useful for debugging your JavaScript code. Other options are used for profiling Node.js itself, which is outside the scope of this guide. + +`--perf-basic-prof-only-functions` produces less output, so it's the option with least overhead. + +### Why do I need them at all? + +Well, without these options you'll still get a flame graph, but with most bars labeled `v8::Function::Call`. + +## `perf` output issues + +### Node.js 8.x V8 pipeline changes + +Node.js 8.x and above ships with new optimizations to JavaScript compilation pipeline in V8 engine which makes function names/references unreachable for perf sometimes. (It's called Turbofan) + +The result is you might not get your function names right in the flame graph. + +You'll notice `ByteCodeHandler:` where you'd expect function names. + +[0x](https://www.npmjs.com/package/0x) has some mitigations for that built in. + +For details see: + +* https://github.com/nodejs/benchmarking/issues/168 +* https://github.com/nodejs/diagnostics/issues/148#issuecomment-369348961 + +### Node.js 10+ + +Node.js 10.x addresses the issue with Turbofan using the `--interpreted-frames-native-stack` flag. + +Run `node --interpreted-frames-native-stack --perf-basic-prof-only-functions` to get function names in the flame graph regardless of which pipeline V8 used to compile your JavaScript. + +### Broken labels in the flame graph + +If you're seeing labels looking like this + +``` +node`_ZN2v88internal11interpreter17BytecodeGenerator15VisitStatementsEPNS0_8ZoneListIPNS0_9StatementEEE +``` + +it means the Linux perf you're using was not compiled with demangle support, see https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1396654 for example + +## Examples + +Practice capturing flame graphs yourself with [a flame graph exercise](https://github.com/naugtur/node-example-flamegraph)! diff --git a/locale/fr/docs/guides/domain-postmortem.md b/locale/fr/docs/guides/domain-postmortem.md new file mode 100644 index 000000000000..243b24a9e760 --- /dev/null +++ b/locale/fr/docs/guides/domain-postmortem.md @@ -0,0 +1,338 @@ +--- +title: Domain Module Postmortem +layout: docs.hbs +--- + +# Domain Module Postmortem + +## Usability Issues + +### Implicit Behavior + +It's possible for a developer to create a new domain and then simply run `domain.enter()`. Which then acts as a catch-all for any exception in the future that couldn't be observed by the thrower. Allowing a module author to intercept the exceptions of unrelated code in a different module. Preventing the originator of the code from knowing about its own exceptions. + +Here's an example of how one indirectly linked modules can affect another: + +```js +// module a.js +const b = require('./b'); +const c = require('./c'); + +// module b.js +const d = require('domain').create(); +d.on('error', () => { /* silence everything */ }); +d.enter(); + +// module c.js +const dep = require('some-dep'); +dep.method(); // Uh-oh! This method doesn't actually exist. +``` + +Since module `b` enters the domain but never exits any uncaught exception will be swallowed. Leaving module `c` in the dark as to why it didn't run the entire script. Leaving a potentially partially populated `module.exports`. Doing this is not the same as listening for `'uncaughtException'`. As the latter is explicitly meant to globally catch errors. The other issue is that domains are processed prior to any `'uncaughtException'` handlers, and prevent them from running. + +Another issue is that domains route errors automatically if no `'error'` handler was set on the event emitter. There is no opt-in mechanism for this, and automatically propagates across the entire asynchronous chain. This may seem useful at first, but once asynchronous calls are two or more modules deep and one of them doesn't include an error handler the creator of the domain will suddenly be catching unexpected exceptions, and the thrower's exception will go unnoticed by the author. + +The following is a simple example of how a missing `'error'` handler allows the active domain to hijack the error: + +```js +const domain = require('domain'); +const net = require('net'); +const d = domain.create(); +d.on('error', (err) => console.error(err.message)); + +d.run(() => net.createServer((c) => { + c.end(); + c.write('bye'); +}).listen(8000)); +``` + +Even manually removing the connection via `d.remove(c)` does not prevent the connection's error from being automatically intercepted. + +Failures that plagues both error routing and exception handling are the inconsistencies in how errors are bubbled. The following is an example of how nested domains will and won't bubble the exception based on when they happen: + +```js +const domain = require('domain'); +const net = require('net'); +const d = domain.create(); +d.on('error', () => console.error('d intercepted an error')); + +d.run(() => { + const server = net.createServer((c) => { + const e = domain.create(); // No 'error' handler being set. + e.run(() => { + // This will not be caught by d's error handler. + setImmediate(() => { + throw new Error('thrown from setImmediate'); + }); + // Though this one will bubble to d's error handler. + throw new Error('immediately thrown'); + }); + }).listen(8080); +}); +``` + +It may be expected that nested domains always remain nested, and will always propagate the exception up the domain stack. Or that exceptions will never automatically bubble. Unfortunately both these situations occur, leading to potentially confusing behavior that may even be prone to difficult to debug timing conflicts. + +### API Gaps + +While APIs based on using `EventEmitter` can use `bind()` and errback style callbacks can use `intercept()`, alternative APIs that implicitly bind to the active domain must be executed inside of `run()`. Meaning if module authors wanted to support domains using a mechanism alternative to those mentioned they must manually implement domain support themselves. Instead of being able to leverage the implicit mechanisms already in place. + +### Error Propagation + +Propagating errors across nested domains is not straight forward, if even possible. Existing documentation shows a simple example of how to `close()` an `http` server if there is an error in the request handler. What it does not explain is how to close the server if the request handler creates another domain instance for another async request. Using the following as a simple example of the failing of error propagation: + +```js +const d1 = domain.create(); +d1.foo = true; // custom member to make more visible in console +d1.on('error', (er) => { /* handle error */ }); + +d1.run(() => setTimeout(() => { + const d2 = domain.create(); + d2.bar = 43; + d2.on('error', (er) => console.error(er.message, domain._stack)); + d2.run(() => { + setTimeout(() => { + setTimeout(() => { + throw new Error('outer'); + }); + throw new Error('inner'); + }); + }); +})); +``` + +Even in the case that the domain instances are being used for local storage so access to resources are made available there is still no way to allow the error to continue propagating from `d2` back to `d1`. Quick inspection may tell us that simply throwing from `d2`'s domain `'error'` handler would allow `d1` to then catch the exception and execute its own error handler. Though that is not the case. Upon inspection of `domain._stack` you'll see that the stack only contains `d2`. + +This may be considered a failing of the API, but even if it did operate in this way there is still the issue of transmitting the fact that a branch in the asynchronous execution has failed, and that all further operations in that branch must cease. In the example of the http request handler, if we fire off several asynchronous requests and each one then `write()`'s data back to the client many more errors will arise from attempting to `write()` to a closed handle. More on this in _Resource Cleanup on Exception_. + +### Resource Cleanup on Exception + +The following script contains a more complex example of properly cleaning up in a small resource dependency tree in the case that an exception occurs in a given connection or any of its dependencies. Breaking down the script into its basic operations: + +```js +'use strict'; + +const domain = require('domain'); +const EE = require('events'); +const fs = require('fs'); +const net = require('net'); +const util = require('util'); +const print = process._rawDebug; + +const pipeList = []; +const FILENAME = '/tmp/tmp.tmp'; +const PIPENAME = '/tmp/node-domain-example-'; +const FILESIZE = 1024; +let uid = 0; + +// Setting up temporary resources +const buf = Buffer.alloc(FILESIZE); +for (let i = 0; i < buf.length; i++) + buf[i] = ((Math.random() * 1e3) % 78) + 48; // Basic ASCII +fs.writeFileSync(FILENAME, buf); + +function ConnectionResource(c) { + EE.call(this); + this._connection = c; + this._alive = true; + this._domain = domain.create(); + this._id = Math.random().toString(32).substr(2).substr(0, 8) + (++uid); + + this._domain.add(c); + this._domain.on('error', () => { + this._alive = false; + }); +} +util.inherits(ConnectionResource, EE); + +ConnectionResource.prototype.end = function end(chunk) { + this._alive = false; + this._connection.end(chunk); + this.emit('end'); +}; + +ConnectionResource.prototype.isAlive = function isAlive() { + return this._alive; +}; + +ConnectionResource.prototype.id = function id() { + return this._id; +}; + +ConnectionResource.prototype.write = function write(chunk) { + this.emit('data', chunk); + return this._connection.write(chunk); +}; + +// Example begin +net.createServer((c) => { + const cr = new ConnectionResource(c); + + const d1 = domain.create(); + fs.open(FILENAME, 'r', d1.intercept((fd) => { + streamInParts(fd, cr, 0); + })); + + pipeData(cr); + + c.on('close', () => cr.end()); +}).listen(8080); + +function streamInParts(fd, cr, pos) { + const d2 = domain.create(); + const alive = true; + d2.on('error', (er) => { + print('d2 error:', er.message); + cr.end(); + }); + fs.read(fd, Buffer.alloc(10), 0, 10, pos, d2.intercept((bRead, buf) => { + if (!cr.isAlive()) { + return fs.close(fd); + } + if (cr._connection.bytesWritten < FILESIZE) { + // Documentation says callback is optional, but doesn't mention that if + // the write fails an exception will be thrown. + const goodtogo = cr.write(buf); + if (goodtogo) { + setTimeout(() => streamInParts(fd, cr, pos + bRead), 1000); + } else { + cr._connection.once('drain', () => streamInParts(fd, cr, pos + bRead)); + } + return; + } + cr.end(buf); + fs.close(fd); + })); +} + +function pipeData(cr) { + const pname = PIPENAME + cr.id(); + const ps = net.createServer(); + const d3 = domain.create(); + const connectionList = []; + d3.on('error', (er) => { + print('d3 error:', er.message); + cr.end(); + }); + d3.add(ps); + ps.on('connection', (conn) => { + connectionList.push(conn); + conn.on('data', () => {}); // don't care about incoming data. + conn.on('close', () => { + connectionList.splice(connectionList.indexOf(conn), 1); + }); + }); + cr.on('data', (chunk) => { + for (let i = 0; i < connectionList.length; i++) { + connectionList[i].write(chunk); + } + }); + cr.on('end', () => { + for (let i = 0; i < connectionList.length; i++) { + connectionList[i].end(); + } + ps.close(); + }); + pipeList.push(pname); + ps.listen(pname); +} + +process.on('SIGINT', () => process.exit()); +process.on('exit', () => { + try { + for (let i = 0; i < pipeList.length; i++) { + fs.unlinkSync(pipeList[i]); + } + fs.unlinkSync(FILENAME); + } catch (e) { } +}); + +``` + +* When a new connection happens, concurrently: + * Open a file on the file system + * Open Pipe to unique socket +* Read a chunk of the file asynchronously +* Write chunk to both the TCP connection and any listening sockets +* If any of these resources error, notify all other attached resources that they need to clean up and shutdown + +As we can see from this example a lot more must be done to properly clean up resources when something fails than what can be done strictly through the domain API. All that domains offer is an exception aggregation mechanism. Even the potentially useful ability to propagate data with the domain is easily countered, in this example, by passing the needed resources as a function argument. + +One problem domains perpetuated was the supposed simplicity of being able to continue execution, contrary to what the documentation stated, of the application despite an unexpected exception. This example demonstrates the fallacy behind that idea. + +Attempting proper resource cleanup on unexpected exception becomes more complex as the application itself grows in complexity. This example only has 3 basic resources in play, and all of them with a clear dependency path. If an application uses something like shared resources or resource reuse the ability to cleanup, and properly test that cleanup has been done, grows greatly. + +In the end, in terms of handling errors, domains aren't much more than a glorified `'uncaughtException'` handler. Except with more implicit and unobservable behavior by third-parties. + +### Resource Propagation + +Another use case for domains was to use it to propagate data along asynchronous data paths. One problematic point is the ambiguity of when to expect the correct domain when there are multiple in the stack (which must be assumed if the async stack works with other modules). Also the conflict between being able to depend on a domain for error handling while also having it available to retrieve the necessary data. + +The following is a involved example demonstrating the failing using domains to propagate data along asynchronous stacks: + +```js +const domain = require('domain'); +const net = require('net'); + +const server = net.createServer((c) => { + // Use a domain to propagate data across events within the + // connection so that we don't have to pass arguments + // everywhere. + const d = domain.create(); + d.data = { connection: c }; + d.add(c); + // Mock class that does some useless async data transformation + // for demonstration purposes. + const ds = new DataStream(dataTransformed); + c.on('data', (chunk) => ds.data(chunk)); +}).listen(8080, () => console.log('listening on 8080')); + +function dataTransformed(chunk) { + // FAIL! Because the DataStream instance also created a + // domain we have now lost the active domain we had + // hoped to use. + domain.active.data.connection.write(chunk); +} + +function DataStream(cb) { + this.cb = cb; + // DataStream wants to use domains for data propagation too! + // Unfortunately this will conflict with any domain that + // already exists. + this.domain = domain.create(); + this.domain.data = { inst: this }; +} + +DataStream.prototype.data = function data(chunk) { + // This code is self contained, but pretend it's a complex + // operation that crosses at least one other module. So + // passing along "this", etc., is not easy. + this.domain.run(() => { + // Simulate an async operation that does the data transform. + setImmediate(() => { + for (let i = 0; i < chunk.length; i++) + chunk[i] = ((chunk[i] + Math.random() * 100) % 96) + 33; + // Grab the instance from the active domain and use that + // to call the user's callback. + const self = domain.active.data.inst; + self.cb(chunk); + }); + }); +}; +``` + +The above shows that it is difficult to have more than one asynchronous API attempt to use domains to propagate data. This example could possibly be fixed by assigning `parent: domain.active` in the `DataStream` constructor. Then restoring it via `domain.active = domain.active.data.parent` just before the user's callback is called. Also the instantiation of `DataStream` in the `'connection'` callback must be run inside `d.run()`, instead of simply using `d.add(c)`, otherwise there will be no active domain. + +In short, for this to have a prayer of a chance usage would need to strictly adhere to a set of guidelines that would be difficult to enforce or test. + +## Performance Issues + +A significant deterrent from using domains is the overhead. Using node's built-in http benchmark, `http_simple.js`, without domains it can handle over 22,000 requests/second. Whereas if it's run with `NODE_USE_DOMAINS=1` that number drops down to under 17,000 requests/second. In this case there is only a single global domain. If we edit the benchmark so the http request callback creates a new domain instance performance drops further to 15,000 requests/second. + +While this probably wouldn't affect a server only serving a few hundred or even a thousand requests per second, the amount of overhead is directly proportional to the number of asynchronous requests made. So if a single connection needs to connect to several other services all of those will contribute to the overall latency of delivering the final product to the client. + +Using `AsyncWrap` and tracking the number of times `init`/`pre`/`post`/`destroy` are called in the mentioned benchmark we find that the sum of all events called is over 170,000 times per second. This means even adding 1 microsecond overhead per call for any type of setup or tear down will result in a 17% performance loss. Granted, this is for the optimized scenario of the benchmark, but I believe this demonstrates the necessity for a mechanism such as domain to be as cheap to run as possible. + +## Looking Ahead + +The domain module has been soft deprecated since Dec 2014, but has not yet been removed because node offers no alternative functionality at the moment. As of this writing there is ongoing work building out the `AsyncWrap` API and a proposal for Zones being prepared for the TC39. At such time there is suitable functionality to replace domains it will undergo the full deprecation cycle and eventually be removed from core. diff --git a/locale/fr/docs/guides/getting-started-guide.md b/locale/fr/docs/guides/getting-started-guide.md new file mode 100644 index 000000000000..c176110b8d34 --- /dev/null +++ b/locale/fr/docs/guides/getting-started-guide.md @@ -0,0 +1,29 @@ +--- +title: Getting Started Guide +layout: docs.hbs +--- + +# How do I start with Node.js after I installed it? + +Once we have installed Node.js, let's build our first web server. Create a file named `app.js` containing the following contents: + +```javascript +const http = require('http'); + +const hostname = '127.0.0.1'; +const port = 3000; + +const server = http.createServer((req, res) => { + res.statusCode = 200; + res.setHeader('Content-Type', 'text/plain'); + res.end('Hello World'); +}); + +server.listen(port, hostname, () => { + console.log(`Server running at http://${hostname}:${port}/`); +}); +``` + +Now, run your web server using `node app.js`. Visit `http://localhost:3000` and you will see a message saying "Hello World". + +Refer to the [Introduction to Node.js](https://nodejs.dev/) for a more comprehensive guide to getting started with Node.js. diff --git a/locale/fr/docs/guides/index.md b/locale/fr/docs/guides/index.md new file mode 100644 index 000000000000..ad352ee74bac --- /dev/null +++ b/locale/fr/docs/guides/index.md @@ -0,0 +1,32 @@ +--- +title: Guides +layout: docs.hbs +--- + +# Guides + +## General + +* [Getting Started Guide](/en/docs/guides/getting-started-guide/) +* [Debugging - Getting Started](/en/docs/guides/debugging-getting-started/) +* [Easy profiling for Node.js Applications](/en/docs/guides/simple-profiling/) +* [Diagnostics - Flame Graphs](/en/docs/guides/diagnostics-flamegraph/) +* [Dockerizing a Node.js web app](/en/docs/guides/nodejs-docker-webapp/) +* [Migrating to safe Buffer constructors](/en/docs/guides/buffer-constructor-deprecation/) + +## Node.js core concepts + +* [Introduction to Node.js](https://nodejs.dev/) +* [Overview of Blocking vs Non-Blocking](/en/docs/guides/blocking-vs-non-blocking/) +* [The Node.js Event Loop, Timers, and `process.nextTick()`](/en/docs/guides/event-loop-timers-and-nexttick/) +* [Don't Block the Event Loop (or the Worker Pool)](/en/docs/guides/dont-block-the-event-loop/) +* [Timers in Node.js](/en/docs/guides/timers-in-node/) + +## Module-related guides + +* [Anatomy of an HTTP Transaction](/en/docs/guides/anatomy-of-an-http-transaction/) +* [Working with Different Filesystems](/en/docs/guides/working-with-different-filesystems/) +* [Backpressuring in Streams](/en/docs/guides/backpressuring-in-streams/) +* [Domain Module Postmortem](/en/docs/guides/domain-postmortem/) +* [How to publish N-API package](/en/docs/guides/publishing-napi-modules/) +* [ABI Stability](/en/docs/guides/abi-stability/) diff --git a/locale/fr/docs/guides/nodejs-docker-webapp.md b/locale/fr/docs/guides/nodejs-docker-webapp.md new file mode 100644 index 000000000000..27219bd037f4 --- /dev/null +++ b/locale/fr/docs/guides/nodejs-docker-webapp.md @@ -0,0 +1,237 @@ +--- +title: Dockerizing a Node.js web app +layout: docs.hbs +--- + +# Dockerizing a Node.js web app + +The goal of this example is to show you how to get a Node.js application into a Docker container. The guide is intended for development, and *not* for a production deployment. The guide also assumes you have a working [Docker installation](https://docs.docker.com/engine/installation/) and a basic understanding of how a Node.js application is structured. + +In the first part of this guide we will create a simple web application in Node.js, then we will build a Docker image for that application, and lastly we will instantiate a container from that image. + +Docker allows you to package an application with its environment and all of its dependencies into a "box", called a container. Usually, a container consists of an application running in a stripped-to-basics version of a Linux operating system. An image is the blueprint for a container, a container is a running instance of an image. + +## Create the Node.js app + +First, create a new directory where all the files would live. In this directory create a `package.json` file that describes your app and its dependencies: + +```json +{ + "name": "docker_web_app", + "version": "1.0.0", + "description": "Node.js on Docker", + "author": "First Last ", + "main": "server.js", + "scripts": { + "start": "node server.js" + }, + "dependencies": { + "express": "^4.16.1" + } +} +``` + +With your new `package.json` file, run `npm install`. If you are using `npm` version 5 or later, this will generate a `package-lock.json` file which will be copied to your Docker image. + +Then, create a `server.js` file that defines a web app using the [Express.js](https://expressjs.com/) framework: + +```javascript +'use strict'; + +const express = require('express'); + +// Constants +const PORT = 8080; +const HOST = '0.0.0.0'; + +// App +const app = express(); +app.get('/', (req, res) => { + res.send('Hello World'); +}); + +app.listen(PORT, HOST); +console.log(`Running on http://${HOST}:${PORT}`); +``` + +In the next steps, we'll look at how you can run this app inside a Docker container using the official Docker image. First, you'll need to build a Docker image of your app. + +## Creating a Dockerfile + +Create an empty file called `Dockerfile`: + +```markup +touch Dockerfile +``` + +Open the `Dockerfile` in your favorite text editor + +The first thing we need to do is define from what image we want to build from. Here we will use the latest LTS (long term support) version `10` of `node` available from the [Docker Hub](https://hub.docker.com/): + +```docker +FROM node:10 +``` + +Next we create a directory to hold the application code inside the image, this will be the working directory for your application: + +```docker +# Create app directory +WORKDIR /usr/src/app +``` + +This image comes with Node.js and NPM already installed so the next thing we need to do is to install your app dependencies using the `npm` binary. Please note that if you are using `npm` version 4 or earlier a `package-lock.json` file will *not* be generated. + +```docker +# Install app dependencies +# A wildcard is used to ensure both package.json AND package-lock.json are copied +# where available (npm@5+) +COPY package*.json ./ + +RUN npm install +# If you are building your code for production +# RUN npm ci --only=production +``` + +Note that, rather than copying the entire working directory, we are only copying the `package.json` file. This allows us to take advantage of cached Docker layers. bitJudo has a good explanation of this [here](http://bitjudo.com/blog/2014/03/13/building-efficient-dockerfiles-node-dot-js/). Furthermore, the `npm ci` command, specified in the comments, helps provide faster, reliable, reproducible builds for production environments. You can read more about this [here](https://blog.npmjs.org/post/171556855892/introducing-npm-ci-for-faster-more-reliable). + +To bundle your app's source code inside the Docker image, use the `COPY` instruction: + +```docker +# Bundle app source +COPY . . +``` + +Your app binds to port `8080` so you'll use the `EXPOSE` instruction to have it mapped by the `docker` daemon: + +```docker +EXPOSE 8080 +``` + +Last but not least, define the command to run your app using `CMD` which defines your runtime. Here we will use `node server.js` to start your server: + +```docker +CMD [ "node", "server.js" ] +``` + +Your `Dockerfile` should now look like this: + +```docker +FROM node:10 + +# Create app directory +WORKDIR /usr/src/app + +# Install app dependencies +# A wildcard is used to ensure both package.json AND package-lock.json are copied +# where available (npm@5+) +COPY package*.json ./ + +RUN npm install +# If you are building your code for production +# RUN npm ci --only=production + +# Bundle app source +COPY . . + +EXPOSE 8080 +CMD [ "node", "server.js" ] +``` + +## .dockerignore file + +Create a `.dockerignore` file in the same directory as your `Dockerfile` with following content: + +``` +node_modules +npm-debug.log +``` + +This will prevent your local modules and debug logs from being copied onto your Docker image and possibly overwriting modules installed within your image. + +## Building your image + +Go to the directory that has your `Dockerfile` and run the following command to build the Docker image. The `-t` flag lets you tag your image so it's easier to find later using the `docker images` command: + +```bash +docker build -t /node-web-app . +``` + +Your image will now be listed by Docker: + +```bash +$ docker images + +# Example +REPOSITORY TAG ID CREATED +node 10 1934b0b038d1 5 days ago +/node-web-app latest d64d3505b0d2 1 minute ago +``` + +## Run the image + +Running your image with `-d` runs the container in detached mode, leaving the container running in the background. The `-p` flag redirects a public port to a private port inside the container. Run the image you previously built: + +```bash +docker run -p 49160:8080 -d /node-web-app +``` + +Print the output of your app: + +```bash +# Get container ID +$ docker ps + +# Print app output +$ docker logs + +# Example +Running on http://localhost:8080 +``` + +If you need to go inside the container you can use the `exec` command: + +```bash +# Enter the container +$ docker exec -it /bin/bash +``` + +## Test + +To test your app, get the port of your app that Docker mapped: + +```bash +$ docker ps + +# Example +ID IMAGE COMMAND ... PORTS +ecce33b30ebf /node-web-app:latest npm start ... 49160->8080 +``` + +In the example above, Docker mapped the `8080` port inside of the container to the port `49160` on your machine. + +Now you can call your app using `curl` (install if needed via: `sudo apt-get +install curl`): + +```bash +$ curl -i localhost:49160 + +HTTP/1.1 200 OK +X-Powered-By: Express +Content-Type: text/html; charset=utf-8 +Content-Length: 12 +ETag: W/"c-M6tWOb/Y57lesdjQuHeB1P/qTV0" +Date: Mon, 13 Nov 2017 20:53:59 GMT +Connection: keep-alive + +Hello world +``` + +We hope this tutorial helped you get up and running a simple Node.js application on Docker. + +You can find more information about Docker and Node.js on Docker in the following places: + +* [Official Node.js Docker Image](https://hub.docker.com/_/node/) +* [Node.js Docker Best Practices Guide](https://github.com/nodejs/docker-node/blob/master/docs/BestPractices.md) +* [Official Docker documentation](https://docs.docker.com/) +* [Docker Tag on Stack Overflow](https://stackoverflow.com/questions/tagged/docker) +* [Docker Subreddit](https://reddit.com/r/docker) diff --git a/locale/fr/docs/guides/publishing-napi-modules.md b/locale/fr/docs/guides/publishing-napi-modules.md new file mode 100644 index 000000000000..d78432a4305d --- /dev/null +++ b/locale/fr/docs/guides/publishing-napi-modules.md @@ -0,0 +1,37 @@ +--- +title: How to publish N-API package +layout: docs.hbs +--- + +# To publish N-API version of a package alongside a non-N-API version + +The following steps are illustrated using the package `iotivity-node`: + +* First, publish the non-N-API version: + * Update the version in `package.json`. For `iotivity-node`, the version becomes `1.2.0-2`. + * Go through the release checklist (ensure tests/demos/docs are OK) + * `npm publish` +* Then, publish the N-API version: + * Update the version in `package.json`. In the case of `iotivity-node`, the version becomes `1.2.0-3`. For versioning, we recommend following the pre-release version scheme as described by [semver.org](https://semver.org/#spec-item-9) e.g. `1.2.0-napi`. + * Go through the release checklist (ensure tests/demos/docs are OK) + * `npm publish --tag n-api` + +In this example, tagging the release with `n-api` has ensured that, although version 1.2.0-3 is later than the non-N-API published version (1.2.0-2), it will not be installed if someone chooses to install `iotivity-node` by simply running `npm install iotivity-node`. This will install the non-N-API version by default. The user will have to run `npm install iotivity-node@n-api` to receive the N-API version. For more information on using tags with npm check out ["Using dist-tags"](https://docs.npmjs.com/getting-started/using-tags). + +# To introduce a dependency on an N-API version of a package + +To add the N-API version of `iotivity-node` as a dependency, the `package.json` will look like this: + +```json +"dependencies": { + "iotivity-node": "n-api" +} +``` + +**Note:** As explained in ["Using dist-tags"](https://docs.npmjs.com/getting-started/using-tags), unlike regular versions, tagged versions cannot be addressed by version ranges such as `"^2.0.0"` inside `package.json`. The reason for this is that the tag refers to exactly one version. So, if the package maintainer chooses to tag a later version of the package using the same tag, `npm update` will receive the later version. This should be acceptable given the currently experimental nature of N-API. To depend on an N-API-enabled version other than the latest published, the `package.json` dependency will have to refer to the exact version like the following: + +```json +"dependencies": { + "iotivity-node": "1.2.0-3" +} +``` diff --git a/locale/fr/docs/guides/simple-profiling.md b/locale/fr/docs/guides/simple-profiling.md new file mode 100644 index 000000000000..aa0392569fb7 --- /dev/null +++ b/locale/fr/docs/guides/simple-profiling.md @@ -0,0 +1,217 @@ +--- +title: Easy profiling for Node.js Applications +layout: docs.hbs +--- + +# Easy profiling for Node.js Applications + +There are many third party tools available for profiling Node.js applications but, in many cases, the easiest option is to use the Node.js built in profiler. The built in profiler uses the [profiler inside V8](https://v8.dev/docs/profile) which samples the stack at regular intervals during program execution. It records the results of these samples, along with important optimization events such as jit compiles, as a series of ticks: + +``` +code-creation,LazyCompile,0,0x2d5000a337a0,396,"bp native array.js:1153:16",0x289f644df68,~ +code-creation,LazyCompile,0,0x2d5000a33940,716,"hasOwnProperty native v8natives.js:198:30",0x289f64438d0,~ +code-creation,LazyCompile,0,0x2d5000a33c20,284,"ToName native runtime.js:549:16",0x289f643bb28,~ +code-creation,Stub,2,0x2d5000a33d40,182,"DoubleToIStub" +code-creation,Stub,2,0x2d5000a33e00,507,"NumberToStringStub" +``` + +In the past, you needed the V8 source code to be able to interpret the ticks. Luckily, tools have been introduced since Node.js 4.4.0 that facilitate the consumption of this information without separately building V8 from source. Let's see how the built-in profiler can help provide insight into application performance. + +To illustrate the use of the tick profiler, we will work with a simple Express application. Our application will have two handlers, one for adding new users to our system: + +```javascript +app.get('/newUser', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || users[username]) { + return res.sendStatus(400); + } + + const salt = crypto.randomBytes(128).toString('base64'); + const hash = crypto.pbkdf2Sync(password, salt, 10000, 512, 'sha512'); + + users[username] = { salt, hash }; + + res.sendStatus(200); +}); +``` + +and another for validating user authentication attempts: + +```javascript +app.get('/auth', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || !users[username]) { + return res.sendStatus(400); + } + + const { salt, hash } = users[username]; + const encryptHash = crypto.pbkdf2Sync(password, salt, 10000, 512, 'sha512'); + + if (crypto.timingSafeEqual(hash, encryptHash)) { + res.sendStatus(200); + } else { + res.sendStatus(401); + } +}); +``` + +*Please note that these are NOT recommended handlers for authenticating users in your Node.js applications and are used purely for illustration purposes. You should not be trying to design your own cryptographic authentication mechanisms in general. It is much better to use existing, proven authentication solutions.* + +Now assume that we've deployed our application and users are complaining about high latency on requests. We can easily run the app with the built in profiler: + +``` +NODE_ENV=production node --prof app.js +``` + +and put some load on the server using `ab` (ApacheBench): + +``` +curl -X GET "http://localhost:8080/newUser?username=matt&password=password" +ab -k -c 20 -n 250 "http://localhost:8080/auth?username=matt&password=password" +``` + +and get an ab output of: + +``` +Concurrency Level: 20 +Time taken for tests: 46.932 seconds +Complete requests: 250 +Failed requests: 0 +Keep-Alive requests: 250 +Total transferred: 50250 bytes +HTML transferred: 500 bytes +Requests per second: 5.33 [#/sec] (mean) +Time per request: 3754.556 [ms] (mean) +Time per request: 187.728 [ms] (mean, across all concurrent requests) +Transfer rate: 1.05 [Kbytes/sec] received + +... + +Percentage of the requests served within a certain time (ms) + 50% 3755 + 66% 3804 + 75% 3818 + 80% 3825 + 90% 3845 + 95% 3858 + 98% 3874 + 99% 3875 + 100% 4225 (longest request) +``` + +From this output, we see that we're only managing to serve about 5 requests per second and that the average request takes just under 4 seconds round trip. In a real world example, we could be doing lots of work in many functions on behalf of a user request but even in our simple example, time could be lost compiling regular expressions, generating random salts, generating unique hashes from user passwords, or inside the Express framework itself. + +Since we ran our application using the `--prof` option, a tick file was generated in the same directory as your local run of the application. It should have the form `isolate-0xnnnnnnnnnnnn-v8.log` (where `n` is a digit). + +In order to make sense of this file, we need to use the tick processor bundled with the Node.js binary. To run the processor, use the `--prof-process` flag: + +``` +node --prof-process isolate-0xnnnnnnnnnnnn-v8.log > processed.txt +``` + +Opening processed.txt in your favorite text editor will give you a few different types of information. The file is broken up into sections which are again broken up by language. First, we look at the summary section and see: + +``` + [Summary]: + ticks total nonlib name + 79 0.2% 0.2% JavaScript + 36703 97.2% 99.2% C++ + 7 0.0% 0.0% GC + 767 2.0% Shared libraries + 215 0.6% Unaccounted +``` + +This tells us that 97% of all samples gathered occurred in C++ code and that when viewing other sections of the processed output we should pay most attention to work being done in C++ (as opposed to JavaScript). With this in mind, we next find the [C++] section which contains information about which C++ functions are taking the most CPU time and see: + +``` + [C++]: + ticks total nonlib name + 19557 51.8% 52.9% node::crypto::PBKDF2(v8::FunctionCallbackInfo const&) + 4510 11.9% 12.2% _sha1_block_data_order + 3165 8.4% 8.6% _malloc_zone_malloc +``` + +We see that the top 3 entries account for 72.1% of CPU time taken by the program. From this output, we immediately see that at least 51.8% of CPU time is taken up by a function called PBKDF2 which corresponds to our hash generation from a user's password. However, it may not be immediately obvious how the lower two entries factor into our application (or if it is we will pretend otherwise for the sake of example). To better understand the relationship between these functions, we will next look at the [Bottom up (heavy) profile] section which provides information about the primary callers of each function. Examining this section, we find: + +``` + ticks parent name + 19557 51.8% node::crypto::PBKDF2(v8::FunctionCallbackInfo const&) + 19557 100.0% v8::internal::Builtins::~Builtins() + 19557 100.0% LazyCompile: ~pbkdf2 crypto.js:557:16 + + 4510 11.9% _sha1_block_data_order + 4510 100.0% LazyCompile: *pbkdf2 crypto.js:557:16 + 4510 100.0% LazyCompile: *exports.pbkdf2Sync crypto.js:552:30 + + 3165 8.4% _malloc_zone_malloc + 3161 99.9% LazyCompile: *pbkdf2 crypto.js:557:16 + 3161 100.0% LazyCompile: *exports.pbkdf2Sync crypto.js:552:30 +``` + +Parsing this section takes a little more work than the raw tick counts above. Within each of the "call stacks" above, the percentage in the parent column tells you the percentage of samples for which the function in the row above was called by the function in the current row. For example, in the middle "call stack" above for _sha1_block_data_order, we see that `_sha1_block_data_order` occurred in 11.9% of samples, which we knew from the raw counts above. However, here, we can also tell that it was always called by the pbkdf2 function inside the Node.js crypto module. We see that similarly, `_malloc_zone_malloc` was called almost exclusively by the same pbkdf2 function. Thus, using the information in this view, we can tell that our hash computation from the user's password accounts not only for the 51.8% from above but also for all CPU time in the top 3 most sampled functions since the calls to `_sha1_block_data_order` and `_malloc_zone_malloc` were made on behalf of the pbkdf2 function. + +At this point, it is very clear that the password based hash generation should be the target of our optimization. Thankfully, you've fully internalized the [benefits of asynchronous programming](https://nodesource.com/blog/why-asynchronous) and you realize that the work to generate a hash from the user's password is being done in a synchronous way and thus tying down the event loop. This prevents us from working on other incoming requests while computing a hash. + +To remedy this issue, you make a small modification to the above handlers to use the asynchronous version of the pbkdf2 function: + +```javascript +app.get('/auth', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || !users[username]) { + return res.sendStatus(400); + } + + crypto.pbkdf2(password, users[username].salt, 10000, 512, 'sha512', (err, hash) => { + if (users[username].hash.toString() === hash.toString()) { + res.sendStatus(200); + } else { + res.sendStatus(401); + } + }); +}); +``` + +A new run of the ab benchmark above with the asynchronous version of your app yields: + +``` +Concurrency Level: 20 +Time taken for tests: 12.846 seconds +Complete requests: 250 +Failed requests: 0 +Keep-Alive requests: 250 +Total transferred: 50250 bytes +HTML transferred: 500 bytes +Requests per second: 19.46 [#/sec] (mean) +Time per request: 1027.689 [ms] (mean) +Time per request: 51.384 [ms] (mean, across all concurrent requests) +Transfer rate: 3.82 [Kbytes/sec] received + +... + +Percentage of the requests served within a certain time (ms) + 50% 1018 + 66% 1035 + 75% 1041 + 80% 1043 + 90% 1049 + 95% 1063 + 98% 1070 + 99% 1071 + 100% 1079 (longest request) +``` + +Yay! Your app is now serving about 20 requests per second, roughly 4 times more than it was with the synchronous hash generation. Additionally, the average latency is down from the 4 seconds before to just over 1 second. + +Hopefully, through the performance investigation of this (admittedly contrived) example, you've seen how the V8 tick processor can help you gain a better understanding of the performance of your Node.js applications. diff --git a/locale/fr/docs/guides/timers-in-node.md b/locale/fr/docs/guides/timers-in-node.md new file mode 100644 index 000000000000..4cf765124e51 --- /dev/null +++ b/locale/fr/docs/guides/timers-in-node.md @@ -0,0 +1,125 @@ +--- +title: Timers in Node.js +layout: docs.hbs +--- + +# Timers in Node.js and beyond + +The Timers module in Node.js contains functions that execute code after a set period of time. Timers do not need to be imported via `require()`, since all the methods are available globally to emulate the browser JavaScript API. To fully understand when timer functions will be executed, it's a good idea to read up on the Node.js [Event Loop](/en/docs/guides/event-loop-timers-and-nexttick/). + +## Controlling the Time Continuum with Node.js + +The Node.js API provides several ways of scheduling code to execute at some point after the present moment. The functions below may seem familiar, since they are available in most browsers, but Node.js actually provides its own implementation of these methods. Timers integrate very closely with the system, and despite the fact that the API mirrors the browser API, there are some differences in implementation. + +### "When I say so" Execution ~ *`setTimeout()`* + +`setTimeout()` can be used to schedule code execution after a designated amount of milliseconds. This function is similar to [`window.setTimeout()`](https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/setTimeout) from the browser JavaScript API, however a string of code cannot be passed to be executed. + +`setTimeout()` accepts a function to execute as its first argument and the millisecond delay defined as a number as the second argument. Additional arguments may also be included and these will be passed on to the function. Here is an example of that: + +```js +function myFunc(arg) { + console.log(`arg was => ${arg}`); +} + +setTimeout(myFunc, 1500, 'funky'); +``` + +The above function `myFunc()` will execute as close to 1500 milliseconds (or 1.5 seconds) as possible due to the call of `setTimeout()`. + +The timeout interval that is set cannot be relied upon to execute after that *exact* number of milliseconds. This is because other executing code that blocks or holds onto the event loop will push the execution of the timeout back. The *only* guarantee is that the timeout will not execute *sooner* than the declared timeout interval. + +`setTimeout()` returns a `Timeout` object that can be used to reference the timeout that was set. This returned object can be used to cancel the timeout ( see `clearTimeout()` below) as well as change the execution behavior (see `unref()` below). + +### "Right after this" Execution ~ *`setImmediate()`* + +`setImmediate()` will execute code at the end of the current event loop cycle. This code will execute *after* any I/O operations in the current event loop and *before* any timers scheduled for the next event loop. This code execution could be thought of as happening "right after this", meaning any code following the `setImmediate()` function call will execute before the `setImmediate()` function argument. + +The first argument to `setImmediate()` will be the function to execute. Any subsequent arguments will be passed to the function when it is executed. Here's an example: + +```js +console.log('before immediate'); + +setImmediate((arg) => { + console.log(`executing immediate: ${arg}`); +}, 'so immediate'); + +console.log('after immediate'); +``` + +The above function passed to `setImmediate()` will execute after all runnable code has executed, and the console output will be: + +``` +before immediate +after immediate +executing immediate: so immediate +``` + +`setImmediate()` returns an `Immediate` object, which can be used to cancel the scheduled immediate (see `clearImmediate()` below). + +Note: Don't get `setImmediate()` confused with `process.nextTick()`. There are some major ways they differ. The first is that `process.nextTick()` will run *before* any `Immediate`s that are set as well as before any scheduled I/O. The second is that `process.nextTick()` is non-clearable, meaning once code has been scheduled to execute with `process.nextTick()`, the execution cannot be stopped, just like with a normal function. Refer to [this guide](/en/docs/guides/event-loop-timers-and-nexttick/#process-nexttick) to better understand the operation of `process.nextTick()`. + +### "Infinite Loop" Execution ~ *`setInterval()`* + +If there is a block of code that should execute multiple times, `setInterval()` can be used to execute that code. `setInterval()` takes a function argument that will run an infinite number of times with a given millisecond delay as the second argument. Just like `setTimeout()`, additional arguments can be added beyond the delay, and these will be passed on to the function call. Also like `setTimeout()`, the delay cannot be guaranteed because of operations that may hold on to the event loop, and therefore should be treated as an approximate delay. See the below example: + +```js +function intervalFunc() { + console.log('Cant stop me now!'); +} + +setInterval(intervalFunc, 1500); +``` + +In the above example, `intervalFunc()` will execute about every 1500 milliseconds, or 1.5 seconds, until it is stopped (see below). + +Just like `setTimeout()`, `setInterval()` also returns a `Timeout` object which can be used to reference and modify the interval that was set. + +## Clearing the Future + +What can be done if a `Timeout` or `Immediate` object needs to be cancelled? `setTimeout()`, `setImmediate()`, and `setInterval()` return a timer object that can be used to reference the set `Timeout` or `Immediate` object. By passing said object into the respective `clear` function, execution of that object will be halted completely. The respective functions are `clearTimeout()`, `clearImmediate()`, and `clearInterval()`. See the example below for an example of each: + +```js +const timeoutObj = setTimeout(() => { + console.log('timeout beyond time'); +}, 1500); + +const immediateObj = setImmediate(() => { + console.log('immediately executing immediate'); +}); + +const intervalObj = setInterval(() => { + console.log('interviewing the interval'); +}, 500); + +clearTimeout(timeoutObj); +clearImmediate(immediateObj); +clearInterval(intervalObj); +``` + +## Leaving Timeouts Behind + +Remember that `Timeout` objects are returned by `setTimeout` and `setInterval`. The `Timeout` object provides two functions intended to augment `Timeout` behavior with `unref()` and `ref()`. If there is a `Timeout` object scheduled using a `set` function, `unref()` can be called on that object. This will change the behavior slightly, and not call the `Timeout` object *if it is the last code to execute*. The `Timeout` object will not keep the process alive, waiting to execute. + +In similar fashion, a `Timeout` object that has had `unref()` called on it can remove that behavior by calling `ref()` on that same `Timeout` object, which will then ensure its execution. Be aware, however, that this does not *exactly* restore the initial behavior for performance reasons. See below for examples of both: + +```js +const timerObj = setTimeout(() => { + console.log('will i run?'); +}); + +// if left alone, this statement will keep the above +// timeout from running, since the timeout will be the only +// thing keeping the program from exiting +timerObj.unref(); + +// we can bring it back to life by calling ref() inside +// an immediate +setImmediate(() => { + timerObj.ref(); +}); +``` + +## Further Down the Event Loop + +There's much more to the Event Loop and Timers than this guide has covered. To learn more about the internals of the Node.js Event Loop and how Timers operate during execution, check out this Node.js guide: [The Node.js Event Loop, Timers, and process.nextTick()](/en/docs/guides/event-loop-timers-and-nexttick/). diff --git a/locale/fr/docs/guides/working-with-different-filesystems.md b/locale/fr/docs/guides/working-with-different-filesystems.md new file mode 100644 index 000000000000..f4b875c0da31 --- /dev/null +++ b/locale/fr/docs/guides/working-with-different-filesystems.md @@ -0,0 +1,90 @@ +--- +title: Working with Different Filesystems +layout: docs.hbs +--- + +# Working with Different Filesystems + +Node.js exposes many features of the filesystem. But not all filesystems are alike. The following are suggested best practices to keep your code simple and safe when working with different filesystems. + +## Filesystem Behavior + +Before you can work with a filesystem, you need to know how it behaves. Different filesystems behave differently and have more or less features than others: case sensitivity, case insensitivity, case preservation, Unicode form preservation, timestamp resolution, extended attributes, inodes, Unix permissions, alternate data streams etc. + +Be wary of inferring filesystem behavior from `process.platform`. For example, do not assume that because your program is running on Darwin that you are therefore working on a case-insensitive filesystem (HFS+), as the user may be using a case-sensitive filesystem (HFSX). Similarly, do not assume that because your program is running on Linux that you are therefore working on a filesystem which supports Unix permissions and inodes, as you may be on a particular external drive, USB or network drive which does not. + +The operating system may not make it easy to infer filesystem behavior, but all is not lost. Instead of keeping a list of every known filesystem and behavior (which is always going to be incomplete), you can probe the filesystem to see how it actually behaves. The presence or absence of certain features which are easy to probe, are often enough to infer the behavior of other features which are more difficult to probe. + +Remember that some users may have different filesystems mounted at various paths in the working tree. + +## Avoid a Lowest Common Denominator Approach + +You might be tempted to make your program act like a lowest common denominator filesystem, by normalizing all filenames to uppercase, normalizing all filenames to NFC Unicode form, and normalizing all file timestamps to say 1-second resolution. This would be the lowest common denominator approach. + +Do not do this. You would only be able to interact safely with a filesystem which has the exact same lowest common denominator characteristics in every respect. You would be unable to work with more advanced filesystems in the way that users expect, and you would run into filename or timestamp collisions. You would most certainly lose and corrupt user data through a series of complicated dependent events, and you would create bugs that would be difficult if not impossible to solve. + +What happens when you later need to support a filesystem that only has 2-second or 24-hour timestamp resolution? What happens when the Unicode standard advances to include a slightly different normalization algorithm (as has happened in the past)? + +A lowest common denominator approach would tend to try to create a portable program by using only "portable" system calls. This leads to programs that are leaky and not in fact portable. + +## Adopt a Superset Approach + +Make the best use of each platform you support by adopting a superset approach. For example, a portable backup program should sync btimes (the created time of a file or folder) correctly between Windows systems, and should not destroy or alter btimes, even though btimes are not supported on Linux systems. The same portable backup program should sync Unix permissions correctly between Linux systems, and should not destroy or alter Unix permissions, even though Unix permissions are not supported on Windows systems. + +Handle different filesystems by making your program act like a more advanced filesystem. Support a superset of all possible features: case-sensitivity, case-preservation, Unicode form sensitivity, Unicode form preservation, Unix permissions, high-resolution nanosecond timestamps, extended attributes etc. + +Once you have case-preservation in your program, you can always implement case-insensitivity if you need to interact with a case-insensitive filesystem. But if you forego case-preservation in your program, you cannot interact safely with a case-preserving filesystem. The same is true for Unicode form preservation and timestamp resolution preservation. + +If a filesystem provides you with a filename in a mix of lowercase and uppercase, then keep the filename in the exact case given. If a filesystem provides you with a filename in mixed Unicode form or NFC or NFD (or NFKC or NFKD), then keep the filename in the exact byte sequence given. If a filesystem provides you with a millisecond timestamp, then keep the timestamp in millisecond resolution. + +When you work with a lesser filesystem, you can always downsample appropriately, with comparison functions as required by the behavior of the filesystem on which your program is running. If you know that the filesystem does not support Unix permissions, then you should not expect to read the same Unix permissions you write. If you know that the filesystem does not preserve case, then you should be prepared to see `ABC` in a directory listing when your program creates `abc`. But if you know that the filesystem does preserve case, then you should consider `ABC` to be a different filename to `abc`, when detecting file renames or if the filesystem is case-sensitive. + +## Case Preservation + +You may create a directory called `test/abc` and be surprised to see sometimes that `fs.readdir('test')` returns `['ABC']`. This is not a bug in Node. Node returns the filename as the filesystem stores it, and not all filesystems support case-preservation. Some filesystems convert all filenames to uppercase (or lowercase). + +## Unicode Form Preservation + +*Case preservation and Unicode form preservation are similar concepts. To understand why Unicode form should be preserved , make sure that you first understand why case should be preserved. Unicode form preservation is just as simple when understood correctly.* + +Unicode can encode the same characters using several different byte sequences. Several strings may look the same, but have different byte sequences. When working with UTF-8 strings, be careful that your expectations are in line with how Unicode works. Just as you would not expect all UTF-8 characters to encode to a single byte, you should not expect several UTF-8 strings that look the same to the human eye to have the same byte representation. This may be an expectation that you can have of ASCII, but not of UTF-8. + +You may create a directory called `test/café` (NFC Unicode form with byte sequence `<63 61 66 c3 a9>` and `string.length === 5`) and be surprised to see sometimes that `fs.readdir('test')` returns `['café']` (NFD Unicode form with byte sequence `<63 61 66 65 cc 81>` and `string.length === 6`). This is not a bug in Node. Node.js returns the filename as the filesystem stores it, and not all filesystems support Unicode form preservation. + +HFS+, for example, will normalize all filenames to a form almost always the same as NFD form. Do not expect HFS+ to behave the same as NTFS or EXT4 and vice-versa. Do not try to change data permanently through normalization as a leaky abstraction to paper over Unicode differences between filesystems. This would create problems without solving any. Rather, preserve Unicode form and use normalization as a comparison function only. + +## Unicode Form Insensitivity + +Unicode form insensitivity and Unicode form preservation are two different filesystem behaviors often mistaken for each other. Just as case-insensitivity has sometimes been incorrectly implemented by permanently normalizing filenames to uppercase when storing and transmitting filenames, so Unicode form insensitivity has sometimes been incorrectly implemented by permanently normalizing filenames to a certain Unicode form (NFD in the case of HFS+) when storing and transmitting filenames. It is possible and much better to implement Unicode form insensitivity without sacrificing Unicode form preservation, by using Unicode normalization for comparison only. + +## Comparing Different Unicode Forms + +Node.js provides `string.normalize('NFC' / 'NFD')` which you can use to normalize a UTF-8 string to either NFC or NFD. You should never store the output from this function but only use it as part of a comparison function to test whether two UTF-8 strings would look the same to the user. + +You can use `string1.normalize('NFC') === string2.normalize('NFC')` or `string1.normalize('NFD') === string2.normalize('NFD')` as your comparison function. Which form you use does not matter. + +Normalization is fast but you may want to use a cache as input to your comparison function to avoid normalizing the same string many times over. If the string is not present in the cache then normalize it and cache it. Be careful not to store or persist the cache, use it only as a cache. + +Note that using `normalize()` requires that your version of Node.js include ICU (otherwise `normalize()` will just return the original string). If you download the latest version of Node.js from the website then it will include ICU. + +## Timestamp Resolution + +You may set the `mtime` (the modified time) of a file to `1444291759414` (millisecond resolution) and be surprised to see sometimes that `fs.stat` returns the new mtime as `1444291759000` (1-second resolution) or `1444291758000` (2-second resolution). This is not a bug in Node. Node.js returns the timestamp as the filesystem stores it, and not all filesystems support nanosecond, millisecond or 1-second timestamp resolution. Some filesystems even have very coarse resolution for the atime timestamp in particular, e.g. 24 hours for some FAT filesystems. + +## Do Not Corrupt Filenames and Timestamps Through Normalization + +Filenames and timestamps are user data. Just as you would never automatically rewrite user file data to uppercase the data or normalize `CRLF` to `LF` line-endings, so you should never change, interfere or corrupt filenames or timestamps through case / Unicode form / timestamp normalization. Normalization should only ever be used for comparison, never for altering data. + +Normalization is effectively a lossy hash code. You can use it to test for certain kinds of equivalence (e.g. do several strings look the same even though they have different byte sequences) but you can never use it as a substitute for the actual data. Your program should pass on filename and timestamp data as is. + +Your program can create new data in NFC (or in any combination of Unicode form it prefers) or with a lowercase or uppercase filename, or with a 2-second resolution timestamp, but your program should not corrupt existing user data by imposing case / Unicode form / timestamp normalization. Rather, adopt a superset approach and preserve case, Unicode form and timestamp resolution in your program. That way, you will be able to interact safely with filesystems which do the same. + +## Use Normalization Comparison Functions Appropriately + +Make sure that you use case / Unicode form / timestamp comparison functions appropriately. Do not use a case-insensitive filename comparison function if you are working on a case-sensitive filesystem. Do not use a Unicode form insensitive comparison function if you are working on a Unicode form sensitive filesystem (e.g. NTFS and most Linux filesystems which preserve both NFC and NFD or mixed Unicode forms). Do not compare timestamps at 2-second resolution if you are working on a nanosecond timestamp resolution filesystem. + +## Be Prepared for Slight Differences in Comparison Functions + +Be careful that your comparison functions match those of the filesystem (or probe the filesystem if possible to see how it would actually compare). Case-insensitivity for example is more complex than a simple `toLowerCase()` comparison. In fact, `toUpperCase()` is usually better than `toLowerCase()` (since it handles certain foreign language characters differently). But better still would be to probe the filesystem since every filesystem has its own case comparison table baked in. + +As an example, Apple's HFS+ normalizes filenames to NFD form but this NFD form is actually an older version of the current NFD form and may sometimes be slightly different from the latest Unicode standard's NFD form. Do not expect HFS+ NFD to be exactly the same as Unicode NFD all the time. diff --git a/locale/fr/docs/index.md b/locale/fr/docs/index.md new file mode 100644 index 000000000000..9b107a0384ed --- /dev/null +++ b/locale/fr/docs/index.md @@ -0,0 +1,48 @@ +--- +title: Docs +layout: docs.hbs +labels: + lts: LTS +--- + +# About Docs + +There are several types of documentation available on this website: + +* API reference documentation +* ES6 features +* Guides + +## API Reference Documentation + +The [API reference documentation](https://nodejs.org/api/) provides detailed information about a function or object in Node.js. This documentation indicates what arguments a method accepts, the return value of that method, and what errors may be related to that method. It also indicates which methods are available for different versions of Node.js. + +This documentation describes the built-in modules provided by Node.js. It does not document modules provided by the community. + +
+ +### Looking for API docs of previous releases? + +* [Node.js 13.x](https://nodejs.org/docs/latest-v13.x/api/) +* [Node.js 12.x](https://nodejs.org/docs/latest-v12.x/api/) +* [Node.js 11.x](https://nodejs.org/docs/latest-v11.x/api/) +* [Node.js 10.x](https://nodejs.org/docs/latest-v10.x/api/) +* [Node.js 9.x](https://nodejs.org/docs/latest-v9.x/api/) +* [Node.js 8.x](https://nodejs.org/docs/latest-v8.x/api/) +* [Node.js 7.x](https://nodejs.org/docs/latest-v7.x/api/) +* [Node.js 6.x](https://nodejs.org/docs/latest-v6.x/api/) +* [Node.js 5.x](https://nodejs.org/docs/latest-v5.x/api/) +* [Node.js 4.x](https://nodejs.org/docs/latest-v4.x/api/) +* [Node.js 0.12.x](https://nodejs.org/docs/latest-v0.12.x/api/) +* [Node.js 0.10.x](https://nodejs.org/docs/latest-v0.10.x/api/) +* [All versions](https://nodejs.org/docs/) + +
+ +## ES6 Features + +The [ES6 section](/en/docs/es6/) describes the three ES6 feature groups, and details which features are enabled by default in Node.js, alongside explanatory links. It also shows how to find which version of V8 shipped with a particular Node.js release. + +## Guides + +The [Guides section](/en/docs/guides/) has long-form, in-depth articles about Node.js technical features and capabilities. diff --git a/locale/fr/docs/meta/topics/dependencies.md b/locale/fr/docs/meta/topics/dependencies.md new file mode 100644 index 000000000000..db12c22a2ace --- /dev/null +++ b/locale/fr/docs/meta/topics/dependencies.md @@ -0,0 +1,78 @@ +--- +title: Dependencies +layout: docs.hbs +--- + +# Dependencies + +There are several dependencies that Node.js relies on to work the way it does. + +* [Libraries](#libraries) + * [V8](#v8) + * [libuv](#libuv) + * [llhttp](#llhttp) + * [c-ares](#c-ares) + * [OpenSSL](#openssl) + * [zlib](#zlib) +* [Tools](#tools) + * [npm](#npm) + * [gyp](#gyp) + * [gtest](#gtest) + +## Libraries + +### V8 + +The V8 library provides Node.js with a JavaScript engine, which Node.js controls via the V8 C++ API. V8 is maintained by Google, for use in Chrome. + +* [Documentation](https://v8.dev/docs) + +### libuv + +Another important dependency is libuv, a C library that is used to abstract non-blocking I/O operations to a consistent interface across all supported platforms. It provides mechanisms to handle file system, DNS, network, child processes, pipes, signal handling, polling and streaming. It also includes a thread pool for offloading work for some things that can't be done asynchronously at the operating system level. + +* [Documentation](http://docs.libuv.org/) + +### llhttp + +HTTP parsing is handled by a lightweight TypeScript and C library called llhttp. It is designed to not make any syscalls or allocations, so it has a very small per-request memory footprint. + +* [Documentation](https://github.com/nodejs/llhttp) + +### c-ares + +For some asynchronous DNS requests, Node.js uses a C library called c-ares. It is exposed through the DNS module in JavaScript as the `resolve()` family of functions. The `lookup()` function, which is what the rest of core uses, makes use of threaded `getaddrinfo(3)` calls in libuv. The reason for this is that c-ares supports /etc/hosts, /etc/resolv.conf and /etc/svc.conf, but not things like mDNS. + +* [Documentation](https://c-ares.haxx.se/docs.html) + +### OpenSSL + +OpenSSL is used extensively in both the `tls` and `crypto` modules. It provides battle-tested implementations of many cryptographic functions that the modern web relies on for security. + +* [Documentation](https://www.openssl.org/docs/) + +### zlib + +For fast compression and decompression, Node.js relies on the industry-standard zlib library, also known for its use in gzip and libpng. Node.js uses zlib to create sync, async and streaming compression and decompression interfaces. + +* [Documentation](https://www.zlib.net/manual.html) + +## Tools + +### npm + +Node.js is all about modularity, and with that comes the need for a quality package manager; for this purpose, npm was made. With npm comes the largest selection of community-created packages of any programming ecosystem, which makes building Node.js apps quick and easy. + +* [Documentation](https://docs.npmjs.com/) + +### gyp + +The build system is handled by gyp, a python-based project generator copied from V8. It can generate project files for use with build systems across many platforms. Node.js requires a build system because large parts of it — and its dependencies — are written in languages that require compilation. + +* [Documentation](https://gyp.gsrc.io/docs/UserDocumentation.md) + +### gtest + +Native code can be tested using gtest, which is taken from Chromium. It allows testing C/C++ without needing an existing node executable to bootstrap from. + +* [Documentation](https://code.google.com/p/googletest/wiki/V1_7_Documentation) diff --git a/locale/fr/download/current.md b/locale/fr/download/current.md index 496fe790b306..b6b5bc074f47 100644 --- a/locale/fr/download/current.md +++ b/locale/fr/download/current.md @@ -3,33 +3,33 @@ layout: download-current.hbs title: Téléchargements download: Télécharger downloads: - headline: Téléchargements - lts: LTS - current: Dernière - tagline-current: Dernières fonctionnalités - tagline-lts: Recommandé pour la plupart des utilisateurs - display-hint: Afficher les téléchargements pour Node - intro: > - Téléchargez le code source de Node.js pour votre système d'exploitation et commencez à développer dès aujourd'hui. - currentVersion: Dernière version Actuelle - buildInstructions: Compiler Node.js à partir du code source sur les systèmes d'exploitation maintenus - WindowsInstaller: Installateur Windows - WindowsBinary: Binaire Windows - MacOSInstaller: Installateur macOS - MacOSBinary: Binaire macOS - LinuxBinaries: Binaires Linux - SourceCode: Code Source + headline: Téléchargements + lts: LTS + current: Dernière + tagline-current: Dernières fonctionnalités + tagline-lts: Recommandé pour la plupart des utilisateurs + display-hint: Afficher les téléchargements pour Node + intro: > + Téléchargez le code source de Node.js pour votre système d'exploitation et commencez à développer dès aujourd'hui. + currentVersion: Dernière version Actuelle + buildInstructions: Compiler Node.js à partir du code source sur les systèmes d'exploitation maintenus + WindowsInstaller: Installateur Windows + WindowsBinary: Binaire Windows + MacOSInstaller: Installateur macOS + MacOSBinary: Binaire macOS + LinuxBinaries: Binaires Linux + SourceCode: Code Source additional: - headline: Autres plate-formes - intro: > - Les membres de la communauté Node.js maintiennent des installateurs de Node.js pour d'autres plate-formes. Veuillez noter que ces téléchargements ne sont pas maintenus par l'équipe principale de Node.js et n'offrent pas forcément le même niveau de support que les téléchargements officiels. - dockerImage: Image Docker de Node.js - platform: Plate-forme - provider: Fournisseur - SmartOSBinaries: Binaires SmartOS - DockerImage: Image Docker - officialDockerImage: Image officielle de Node.js pour Docker - LinuxPowerSystems: Linux sur Power LE Systems - LinuxSystemZ: Linux sur System z - AIXPowerSystems: AIX sur Power Systems + headline: Autres plate-formes + intro: > + Les membres de la communauté Node.js maintiennent des installateurs de Node.js pour d'autres plate-formes. Veuillez noter que ces téléchargements ne sont pas maintenus par l'équipe principale de Node.js et n'offrent pas forcément le même niveau de support que les téléchargements officiels. + platform: Image Docker de Node.js + provider: Plate-forme + SmartOSBinaries: Fournisseur + DockerImage: Binaires SmartOS + officialDockerImage: Image Docker + LinuxPowerSystems: Image officielle de Node.js pour Docker + LinuxSystemZ: Linux sur Power LE Systems + AIXPowerSystems: Linux sur System z --- + diff --git a/locale/fr/download/index.md b/locale/fr/download/index.md index 279afd0a604d..66c5a1e63d36 100644 --- a/locale/fr/download/index.md +++ b/locale/fr/download/index.md @@ -3,33 +3,33 @@ layout: download.hbs title: Téléchargements download: Télécharger downloads: - headline: Téléchargements - lts: LTS - current: Dernière - tagline-current: Dernières fonctionnalités - tagline-lts: Recommandé pour la plupart des utilisateurs - display-hint: Afficher les téléchargements pour Node - intro: > - Téléchargez le code source de Node.js pour votre système d'exploitation et commencez à développer dès aujourd'hui. - currentVersion: Dernière version LTS - buildInstructions: Compiler Node.js à partir du code source sur les systèmes d'exploitation maintenus - WindowsInstaller: Installateur Windows - WindowsBinary: Binaire Windows - MacOSInstaller: Installateur macOS - MacOSBinary: Binaire macOS - LinuxBinaries: Binaires Linux - SourceCode: Code Source + headline: Téléchargements + lts: LTS + current: Dernière + tagline-current: Dernières fonctionnalités + tagline-lts: Recommandé pour la plupart des utilisateurs + display-hint: Afficher les téléchargements pour Node + intro: > + Téléchargez le code source de Node.js pour votre système d'exploitation et commencez à développer dès aujourd'hui. + currentVersion: Dernière version LTS + buildInstructions: Compiler Node.js à partir du code source sur les systèmes d'exploitation maintenus + WindowsInstaller: Installateur Windows + WindowsBinary: Binaire Windows + MacOSInstaller: Installateur macOS + MacOSBinary: Binaire macOS + LinuxBinaries: Binaires Linux + SourceCode: Code Source additional: - headline: Autres plate-formes - intro: > - Les membres de la communauté Node.js maintiennent des installateurs de Node.js pour d'autres plate-formes. Veuillez noter que ces téléchargements ne sont pas maintenus par l'équipe principale de Node.js et n'offrent pas forcément le même niveau de support que les téléchargements officiels. - dockerImage: Image Docker de Node.js - platform: Plate-forme - provider: Fournisseur - SmartOSBinaries: Binaires SmartOS - DockerImage: Image Docker - officialDockerImage: Image officielle de Node.js pour Docker - LinuxPowerSystems: Linux sur Power LE Systems - LinuxSystemZ: Linux sur System z - AIXPowerSystems: AIX sur Power Systems + headline: Autres plate-formes + intro: > + Les membres de la communauté Node.js maintiennent des installateurs de Node.js pour d'autres plate-formes. Veuillez noter que ces téléchargements ne sont pas maintenus par l'équipe principale de Node.js et n'offrent pas forcément le même niveau de support que les téléchargements officiels. + platform: Image Docker de Node.js + provider: Plate-forme + SmartOSBinaries: Fournisseur + DockerImage: Binaires SmartOS + officialDockerImage: Image Docker + LinuxPowerSystems: Image officielle de Node.js pour Docker + LinuxSystemZ: Linux sur Power LE Systems + AIXPowerSystems: Linux sur System z --- + diff --git a/locale/fr/download/package-manager.md b/locale/fr/download/package-manager.md new file mode 100644 index 000000000000..7be271f9a17c --- /dev/null +++ b/locale/fr/download/package-manager.md @@ -0,0 +1,243 @@ +--- +layout: page.hbs +title: Installing Node.js via package manager +--- + +# Installing Node.js via package manager + +***Note:*** The packages on this page are maintained and supported by their respective packagers, **not** the Node.js core team. Please report any issues you encounter to the package maintainer. If it turns out your issue is a bug in Node.js itself, the maintainer will report the issue upstream. + +--- + +* [Android](#android) +* [Arch Linux](#arch-linux) +* [Debian and Ubuntu based Linux distributions, Enterprise Linux/Fedora and Snap packages](#debian-and-ubuntu-based-linux-distributions-enterprise-linux-fedora-and-snap-packages) +* [FreeBSD](#freebsd) +* [Gentoo](#gentoo) +* [IBM i](#ibm-i) +* [NetBSD](#netbsd) +* [nvm](#nvm) +* [OpenBSD](#openbsd) +* [openSUSE and SLE](#opensuse-and-sle) +* [macOS](#macos) +* [SmartOS and illumos](#smartos-and-illumos) +* [Solus](#solus) +* [Void Linux](#void-linux) +* [Windows](#windows) + +--- + +## Android + +Android support is still experimental in Node.js, so precompiled binaries are not yet provided by Node.js developers. + +However, there are some third-party solutions. For example, [Termux](https://termux.com/) community provides terminal emulator and Linux environment for Android, as well as own package manager and [extensive collection](https://github.com/termux/termux-packages) of many precompiled applications. This command in Termux app will install the last available Node.js version: + +```bash +pkg install nodejs +``` + +Currently, Termux Node.js binaries are linked against `system-icu` (depending on `libicu` package). + +## Arch Linux + +Node.js and npm packages are available in the Community Repository. + +```bash +pacman -S nodejs npm +``` + +## Debian and Ubuntu based Linux distributions, Enterprise Linux/Fedora and Snap packages + +[Node.js binary distributions](https://github.com/nodesource/distributions/blob/master/README.md) are available from NodeSource. + +## FreeBSD + +The most recent release of Node.js is available via the [www/node](https://www.freshports.org/www/node) port. + +Install a binary package via [pkg](https://www.freebsd.org/cgi/man.cgi?pkg): + +```bash +pkg install node +``` + +Or compile it on your own using [ports](https://www.freebsd.org/cgi/man.cgi?ports): + +```bash +cd /usr/ports/www/node && make install +``` + +## Gentoo + +Node.js is available in the portage tree. + +```bash +emerge nodejs +``` + +## IBM i + +LTS versions of Node.js are available from IBM, and are available via [the 'yum' package manager](https://ibm.biz/ibmi-rpms). The package name is `nodejs` followed by the major version number (for instance, `nodejs8`, `nodejs10`, `nodejs12`, etc) + +To install Node.js 12.x from the command line, run the following as a user with \*ALLOBJ special authority: + +```bash +yum install nodejs12 +``` + +Node.js can also be installed with the IBM i Access Client Solutions product. See [this support document](http://www-01.ibm.com/support/docview.wss?uid=nas8N1022619) for more details + +## NetBSD + +Node.js is available in the pkgsrc tree: + +```bash +cd /usr/pkgsrc/lang/nodejs && make install +``` + +Or install a binary package (if available for your platform) using pkgin: + +```bash +pkgin -y install nodejs +``` + +## nvm +Node Version Manager is a bash script used to manage multiple released Node.js versions. It allows you to perform operations like install, uninstall, switch version, etc. To install nvm, use this [install script](https://github.com/nvm-sh/nvm#install--update-script). + +On Unix / OS X systems Node.js built from source can be installed using [nvm](https://github.com/creationix/nvm) by installing into the location that nvm expects: + +```bash +env VERSION=`python tools/getnodeversion.py` make install DESTDIR=`nvm_version_path v$VERSION` PREFIX="" +``` + +After this you can use `nvm` to switch between released versions and versions built from source. For example, if the version of Node.js is v8.0.0-pre: + +```bash +nvm use 8 +``` + +Once the official release is out you will want to uninstall the version built from source: + +```bash +nvm uninstall 8 +``` + +## OpenBSD + +Node.js is available through the ports system. + +```bash +/usr/ports/lang/node +``` + +Using [pkg_add](https://man.openbsd.org/OpenBSD-current/man1/pkg_add.1) on OpenBSD: + +```bash +pkg_add node +``` + +## openSUSE and SLE + +Node.js is available in the main repositories under the following packages: + +* **openSUSE Leap 42.2**: `nodejs4` +* **openSUSE Leap 42.3**: `nodejs4`, `nodejs6` +* **openSUSE Tumbleweed**: `nodejs4`, `nodejs6`, `nodejs8` +* **SUSE Linux Enterprise Server (SLES) 12**: `nodejs4`, `nodejs6` (The "Web and Scripting Module" must be [added before installing](https://www.suse.com/documentation/sles-12/book_sle_deployment/data/sec_add-ons_extensions.html).) + +For example, to install Node.js 4.x on openSUSE Leap 42.2, run the following as root: + +```bash +zypper install nodejs4 +``` + +## macOS + +Simply download the [macOS Installer](https://nodejs.org/en/#home-downloadhead) directly from the [nodejs.org](https://nodejs.org/) web site. + +_If you want to download the package with bash:_ + +```bash +curl "https://nodejs.org/dist/latest/node-${VERSION:-$(wget -qO- https://nodejs.org/dist/latest/ | sed -nE 's|.*>node-(.*)\.pkg.*|\1|p')}.pkg" > "$HOME/Downloads/node-latest.pkg" && sudo installer -store -pkg "$HOME/Downloads/node-latest.pkg" -target "/" +``` + +### Alternatives + +Using **[Homebrew](https://brew.sh/)**: + +```bash +brew install node +``` + +Using **[MacPorts](https://www.macports.org/)**: + +```bash +port install nodejs + +# Example +port install nodejs7 +``` + +Using **[pkgsrc](https://pkgsrc.joyent.com/install-on-osx/)**: + +Install the binary package: + +```bash +pkgin -y install nodejs +``` + +Or build manually from pkgsrc: + +```bash +cd pkgsrc/lang/nodejs && bmake install +``` + +## SmartOS and illumos + +SmartOS images come with pkgsrc pre-installed. On other illumos distributions, first install **[pkgsrc](https://pkgsrc.joyent.com/install-on-illumos/)**, then you may install the binary package as normal: + +```bash +pkgin -y install nodejs +``` + +Or build manually from pkgsrc: + +```bash +cd pkgsrc/lang/nodejs && bmake install +``` + +## Solus + +Solus provides Node.js in its main repository. + +```bash +sudo eopkg install nodejs +``` + +## Void Linux + +Void Linux ships Node.js stable in the main repository. + +```bash +xbps-install -Sy nodejs +``` + +## Windows + +Simply download the [Windows Installer](https://nodejs.org/en/#home-downloadhead) directly from the [nodejs.org](https://nodejs.org/) web site. + +### Alternatives + +Using **[Chocolatey](https://chocolatey.org/)**: + +```bash +cinst nodejs +# or for full install with npm +cinst nodejs.install +``` + +Using **[Scoop](https://scoop.sh/)**: + +```bash +scoop install nodejs +``` diff --git a/locale/fr/download/releases.md b/locale/fr/download/releases.md new file mode 100644 index 000000000000..dc2352ecd8b2 --- /dev/null +++ b/locale/fr/download/releases.md @@ -0,0 +1,23 @@ +--- +layout: download-releases.hbs +title: Previous Releases +modules: "NODE_MODULE_VERSION refers to the ABI (application binary interface) version number of Node.js, used to determine which versions of Node.js compiled C++ add-on binaries can be loaded in to without needing to be re-compiled. It used to be stored as hex value in earlier versions, but is now represented as an integer." +--- + +### io.js & Node.js +Releases 1.x through 3.x were called "io.js" as they were part of the io.js fork. As of Node.js 4.0.0 the former release lines of io.js converged with Node.js 0.12.x into unified Node.js releases. + +
+ +#### Looking for latest release of a version branch? + +* [Node.js 12.x](https://nodejs.org/dist/latest-v12.x/) +* [Node.js 10.x](https://nodejs.org/dist/latest-v10.x/) +* [Node.js 8.x](https://nodejs.org/dist/latest-v8.x/) +* [Node.js 6.x](https://nodejs.org/dist/latest-v6.x/) +* [Node.js 4.x](https://nodejs.org/dist/latest-v4.x/) +* [Node.js 0.12.x](https://nodejs.org/dist/latest-v0.12.x/) +* [Node.js 0.10.x](https://nodejs.org/dist/latest-v0.10.x/) +* [All versions](https://nodejs.org/dist/) + +
diff --git a/locale/fr/get-involved/code-and-learn.md b/locale/fr/get-involved/code-and-learn.md new file mode 100644 index 000000000000..78944e2973d5 --- /dev/null +++ b/locale/fr/get-involved/code-and-learn.md @@ -0,0 +1,24 @@ +--- +title: Code + Learn +layout: contribute.hbs +--- + +# Code + Learn + +Code & Learn events allow you to get started (or go further) with Node.js core contributions. Experienced contributors help guide you through your first (or second or third or fourth) commit to Node.js core. They also are available to provide impromptu guided tours through specific areas of Node.js core source code. + +* [Moscow, Russia on November 6, 2019](https://medium.com/piterjs/announcement-node-js-code-learn-in-moscow-fd997241c77) +* Shanghai, China at [COSCon](https://bagevent.com/event/5744455): November 3, 2019 +* Medellin, Colombia in June 21st & 22nd [NodeConfCo](https://colombia.nodeconf.com/) +* [Saint-Petersburg, Russia on May 26](https://medium.com/piterjs/code-learn-ce20d330530f) +* Bangalore, India at [Node.js - Code & Learn Meetup](https://www.meetup.com/Polyglot-Languages-Runtimes-Java-JVM-nodejs-Swift/events/256057028/): November 17, 2018 +* Kilkenny, Ireland at [NodeConfEU](https://www.nodeconf.eu/): November 4, 2018 +* Vancouver, BC at [Node Interactive](https://events.linuxfoundation.org/events/node-js-interactive-2018/): October 12, 2018 +* [Oakland on April 22, 2017](https://medium.com/the-node-js-collection/code-learn-learn-how-to-contribute-to-node-js-core-8a2dbdf9be45) +* Shanghai at JSConf.CN: July 2017 +* Vancouver, BC at [Node Interactive](http://events.linuxfoundation.org/events/node-interactive): October 6, 2017 +* Kilkenny, Ireland at [NodeConfEU](http://www.nodeconf.eu/): November 5, 2017 +* Austin in December 2016 +* Tokyo in November 2016 +* Amsterdam in September 2016 +* Dublin and London in September 2015 diff --git a/locale/fr/get-involved/collab-summit.md b/locale/fr/get-involved/collab-summit.md new file mode 100644 index 000000000000..6ae5d97c6e1e --- /dev/null +++ b/locale/fr/get-involved/collab-summit.md @@ -0,0 +1,17 @@ +--- +title: Collab Summit +layout: contribute.hbs +--- + +# Collab Summit +Collaboration Summit is an un-conference for bringing current and potential contributors together to discuss Node.js with lively collaboration, education, and knowledge sharing. Committees and working groups come together twice per year to make important decisions while also being able to work on some exciting efforts they want to push forward in-person. + +## Who attends? + +Anyone is welcome to attend Collab Summit. During the summit, leaders will help onboard new contributors to groups they'd love to help prior to integrating them into the working sessions. + +This is your opportunity to learn what is happening within the community to jump in and contribute with the skills you have and would like to hone. + +Working groups will put together a schedule so that people can familiarize themselves before folks get onsite, having the general collaborator discussions, and then dive into breakout sessions. + +We'd love to see you at Collab Summit! Check out the [Summit repo](https://github.com/nodejs/summit) for upcoming and past Collab Summits and have a look at the [issues filed](https://github.com/nodejs/summit/issues) that share what individual working groups and committees are looking to discuss in-person. diff --git a/locale/fr/get-involved/contribute.md b/locale/fr/get-involved/contribute.md new file mode 100644 index 000000000000..6309e099837f --- /dev/null +++ b/locale/fr/get-involved/contribute.md @@ -0,0 +1,47 @@ +--- +title: Contributing +layout: contribute.hbs +--- + +# Contributing + +Thank you for your interest in contributing to Node.js! There are multiple ways and places you can contribute, and we're here to help facilitate that. + +## Asking for General Help + +Because the level of activity in the `nodejs/node` repository is so high, questions or requests for general help using Node.js should be directed at the [Node.js help repository](https://github.com/nodejs/help/issues). + +## Reporting an Issue + +If you have found what you believe to be an issue with Node.js please do not hesitate to file an issue on the GitHub project. When filing your issue please make sure you can express the issue with a reproducible test case, and that test case should not include any external dependencies. That is to say, the test case can be executed without anything more than Node.js itself. + +When reporting an issue we also need as much information about your environment that you can include. We never know what information will be pertinent when trying narrow down the issue. Please include at least the following information: + +* Version of Node.js +* Platform you're running on (macOS, SmartOS, Linux, Windows) +* Architecture you're running on (32bit or 64bit and x86 or ARM) + +The Node.js project is currently managed across a number of separate GitHub repositories, each with their own separate issues database. If possible, please direct any issues you are reporting to the appropriate repository but don't worry if things happen to get put in the wrong place, the community of contributors will be more than happy to help get you pointed in the right direction. + +* To report issues specific to Node.js, please use [nodejs/node](https://github.com/nodejs/node) +* To report issues specific to this website, please use [nodejs/nodejs.org](https://github.com/nodejs/nodejs.org/issues) + +## Code contributions + +If you'd like to fix bugs or add a new feature to Node.js, please make sure you consult the [Node.js Contribution Guidelines](https://github.com/nodejs/node/blob/master/CONTRIBUTING.md#pull-requests). The review process by existing collaborators for all contributions to the project is explained there as well. + +If you are wondering how to start, you can check [Node Todo](https://www.nodetodo.org/) which may guide you towards your first contribution. + +## Becoming a collaborator + +By becoming a collaborator, contributors can have even more impact on the project. They can help other contributors by reviewing their contributions, triage issues and take an even bigger part in shaping the project's future. Individuals identified by the TSC as making significant and valuable contributions across any Node.js repository may be made Collaborators and given commit access to the project. Activities taken into consideration include (but are not limited to) the quality of: + +* code commits and pull requests +* documentation commits and pull requests +* comments on issues and pull requests +* contributions to the Node.js website +* assistance provided to end users and novice contributors +* participation in Working Groups +* other participation in the wider Node.js community + +If individuals making valuable contributions do not believe they have been considered for commit access, they may [log an issue](https://github.com/nodejs/TSC/issues) or [contact a TSC member](https://github.com/nodejs/TSC#current-members) directly. diff --git a/locale/fr/get-involved/index.md b/locale/fr/get-involved/index.md index c47639b9ae24..245679834941 100644 --- a/locale/fr/get-involved/index.md +++ b/locale/fr/get-involved/index.md @@ -7,33 +7,22 @@ layout: contribute.hbs ## Discussions de la communauté -* La [liste des tickets GitHub](https://github.com/nodejs/node/issues) est le bon endroit pour discuter des fonctionnalités cœurs de Node.js. - -* Pour une discussion en temps réel à propos du développement de Node, allez sur `irc.freenode.net` dans le canal `#node.js` avec un [client IRC](https://fr.wikipedia.org/wiki/Liste_de_clients_IRC) ou connectez vous depuis votre navigateur web en utilisant [le WebChat de freenode](https://webchat.freenode.net/#node.js) (en anglais). - -* Le compte Twitter officiel de Node.js est [nodejs](https://twitter.com/nodejs) (en anglais). - -* Le [calendrier de la Fondation Node.js](https://nodejs.org/calendar) avec toutes les réunions publiques d'équipe (en anglais). - -* [Node.js Everywhere](https://newsletter.nodejs.org) est la _newsletter_ mensuelle officielle de Node.js (en anglais). - -* [Node.js Collection](https://medium.com/the-node-js-collection) est une liste de contenus maintenue par la communauté sur Medium (en anglais). - -* La [Community Committee](https://github.com/nodejs/community-committee) est un comité de haut niveau de la Fondation Node.js axé sur les efforts communautaires. +* The [GitHub issues list](https://github.com/nodejs/node/issues) is the place for discussion of Node.js core features. +* For real-time chat about Node.js development go to `irc.freenode.net` in the `#node.js` channel with an [IRC client](https://en.wikipedia.org/wiki/Comparison_of_Internet_Relay_Chat_clients) or connect in your web browser to the channel using [freenode's WebChat](https://webchat.freenode.net/#node.js). +* The official Node.js Twitter account is [nodejs](https://twitter.com/nodejs). +* The [Node.js Foundation calendar](https://nodejs.org/calendar) with all public team meetings. +* [Node.js Everywhere](https://newsletter.nodejs.org) is the official Node.js Monthly Newsletter. +* [Node.js Collection](https://medium.com/the-node-js-collection) is a collection of community-curated content on Medium. +* The [Community Committee](https://github.com/nodejs/community-committee) is a top-level committee in the Node.js Foundation focused on community-facing efforts. +* [Node Slackers](https://www.nodeslackers.com/) is a Node.js-focused Slack community. ## Apprentissage -* [La documentation officielle de l'API de référence](https://nodejs.org/api/) détaille l'API Node (en anglais). - -* [NodeSchool.io](https://nodeschool.io/fr-fr/) vous apprendra les concepts de Node.js avec des jeux interactifs en ligne de commande. - -* [L'étiquette Stack Overflow Node.js](https://stackoverflow.com/questions/tagged/node.js) rassemble de nouvelles informations chaque jour (en anglais). - -* [L'étiquette Node.js de la communauté DEV](https://dev.to/t/node) est un endroit pour partager ses projets Node.js, des articles et des tutoriaux mais aussi pour démarrer des conversations et demander des retours sur des sujets liés à Node.js. Tous les niveaux de connaissances sont acceptés. (en anglais) - -* [Nodeiflux](https://discordapp.com/invite/vUsrbjd) est une communauté sympathique centrée sur le développement _back-end_ avec Node.js sur le service Discord (en anglais). - -* [How To Node](http://howtonode.org/) a de nombreux tutoriaux utiles (en anglais). +* [Official API reference documentation](https://nodejs.org/api/) details the Node.js API. +* [NodeSchool.io](https://nodeschool.io/) will teach you Node.js concepts via interactive command-line games. +* [Stack Overflow Node.js tag](https://stackoverflow.com/questions/tagged/node.js) collects new information every day. +* [The DEV Community Node.js tag](https://dev.to/t/node) is a place to share Node.js projects, articles and tutorials as well as start discussions and ask for feedback on Node.js-related topics. Developers of all skill-levels are welcome to take part. +* [Nodeiflux](https://discordapp.com/invite/vUsrbjd) is a friendly community of Node.js backend developers supporting each other on Discord. ## Site et projets des commuanutés internationales diff --git a/locale/fr/get-involved/node-meetups.md b/locale/fr/get-involved/node-meetups.md new file mode 100644 index 000000000000..6333054cda64 --- /dev/null +++ b/locale/fr/get-involved/node-meetups.md @@ -0,0 +1,679 @@ +--- +title: Node.js Meetups +layout: contribute.hbs +--- + +# Node.js Meetups + +This is a list of Node.js meetups. Please submit a PR if you'd like to add your local group! + +## Code of Conduct + +If any meetup does not have a CoC and/or is reported as an unsafe place, it will be removed from this list. + +## Notes for adding meetups + +FORMAT + +* [Meetup](https://www.meetup.com/pdxnode/) +* Frequency of meetups +* How to submit a talk? «list here» +* Organizer names (if you wish to provide) +* Organizers contact info (if you wish to provide) + +REQUIREMENTS + +* Please state in your PR if this meetup abides by CoC. +* Link to CoC for verification. +* If you do not currently have a CoC, update the meetup with CoC before submitting. +* Submit your PR in alphabetical order. + +## Meetups + +### Africa + +* [Meetup](https://www.nodejs.africa) +* Frequency of meetups - bi-monthly +* How to submit a talk? [Submit to this form](https://docs.google.com/forms/d/e/1FAIpQLSe3vPkiO8ijtbP7fUhEotKefXU-fWUoDGtUSo1khmtA_7v1WQ/viewform) +* Organizer name - Agiri Abraham +* Organizer contact info - + +### Armenia/Yerevan + +* [Meetup](https://www.facebook.com/nodejsarmenia/) +* Frequency of meetups - quarterly +* How to submit a talk? [Write in our Telegram chat](https://t.me/nodejsarmenia) +* Organizer name - Node.js Armenian Community +* Organizer contact info - nodejsarm@gmail.com + +### Argentina + +#### Buenos Aires Province + +##### Buenos Aires + +* [Meetup](https://www.meetup.com/banodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Write a message in the meetup page +* Organizer name - Alejandro Oviedo +* Organizer contact info - + +### Australia + +#### Victoria + +##### Melbourne + +* [Meetup](https://www.meetup.com/NodeMelbourne/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Andrey Sidorov +* Organizer contact info - + +##### Sydney + +* [Meetup](https://www.meetup.com/node-sydney/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - James Richardson. Co-organizer: Jessica Claire +* Organizer contact info - + +### Belgium + +#### Brussels + +##### Brussels + +* [Meetup](https://www.meetup.com/Belgian-node-js-User-Group/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Steven Beeckman +* Organizer contact info - + +### Bolivia + +#### La Paz + +* [Meetup](https://www.meetup.com/LaPazjs) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer names - [Mauricio De La Quintana](https://github.com/maudel), [Guillermo Paredes](https://github.com/GuillermoParedes), [Adrian Zelada](https://github.com/adrianzelada). +* Organizer contact info - [@maudelaquintana](https://twitter.com/maudelaquintana) + +### Brazil + +#### São Paulo + +* [Meetup](https://meetup.com/nodebr) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer names - [Erick Wendel](https://github.com/erickwendel), [Alan Hoffmeister](https://github.com/alanhoff), [Igor França](https://github.com/horaddrim), [Icaro Caldeira](https://github.com/icarcal), [Marcus Bergamo](https://github.com/thebergamo), [Igor Halfeld](https://github.com/igorHalfeld), [Lucas Santos](https://github.com/khaosdoctor). +* Organizer contact info - [@erickwendel_](https://twitter.com/erickwendel_), [@_StaticVoid](https://twitter.com/_staticvoid) + +##### Campinas + +* [Meetup](https://www.meetup.com/Nodeschool-Campinas/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Filipe Oliveira +* Organizer contact info - + +#### Minas Gerais + +* [Meetup](https://www.meetup.com/nodebr/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Hugo Iuri +* Organizer contact info - + +#### Rio Grande do Sul + +##### Porto Alegre + +* [Meetup](https://www.meetup.com/Node-js-Porto-Alegre-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Henrique Schreiner +* Organizer contact info - + +### Canada + +#### British Columbia + +##### Vancouver + +* [Meetup](https://www.meetup.com/Node-JS-Community-Hangouts) +* Frequency of meetups - quarterly +* How to submit a talk? DM @keywordnew on twitter +* Organizer name - Manil Chowdhury +* Organizer contact info - + +#### Ontario + +##### Toronto + +* [Toronto JS Meetup](http://torontojs.com/) +* Frequency of meetups - weekly +* How to submit a talk? _Contact Organizers through Slack: http://slack.torontojs.com/_ +* Organizers name - Dann T. & Paul D. +* Organizer contact info - _Community Slack_ + +### Chile + +#### Santiago + +* [Meetup](https://www.meetup.com/es-ES/NodersJS/) +* Frequency of meetups - monthly +* How to submit a talk? Issue on GitHub [here](https://github.com/Noders/Meetups/issues/new) +* Organizer name - Rodrigo Adones and Ender Bonnet +* Organizer contact info - [Rodrigo](https://github.com/L0rdKras), [Ender](https://twitter.com/enbonnet) + +### Colombia + +#### Antioquia + +##### Medellín + +* [Meetup](https://www.meetup.com/node_co/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Camilo Montoya +* Organizer contact info - + +### Finland + +#### Uusimaa + +##### Helsinski + +* [Meetup](https://www.meetup.com/Helsinki-Node-js/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page +* Organizer name - Juha Lehtomaki +* Organizer contact info - + +### France + +#### Île-de-France + +##### Paris + +* [Meetup](https://www.meetup.com/Nodejs-Paris/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or submit your talk on [nodejs.paris website](http://nodejs.paris/meetups) +* Organizer name - [Etienne Folio](https://twitter.com/Ornthalas), [Stanislas Ormières](https://twitter.com/laruiss), [Nicolas KOKLA](https://twitter.com/nkokla), Quentin Raynaud +* Organizer contact info - + +### Germany + +#### Bavaria + +##### Passau + +* [Meetup](https://www.meetup.com/de-DE/Nodeschool-Passau/) +* Frequency of meetups - quarterly +* How to submit a talk? Email [Valentin](mailto:valentin.huber@msg.group) +* Organizer name - Valentin Huber +* Organizer contact info - [Email](mailto:valentin.huber@msg.group) + +#### Berlin + +* [Meetup](https://www.meetup.com/Node-js-Meetup-Berlin/) +* Frequency of meetups - monthly +* How to submit a talk? Email [Andreas](mailto:npm@lubbe.org) +* Organizer name - Andreas Lubbe +* Organizer contact info - [Email](mailto:npm@lubbe.org) + +#### Hamburg + +* [Meetup](https://www.meetup.com/node-HH/) +* Frequency of meetups - monthly and on demand +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Gregor Elke, Oliver Lorenz +* Organizer contact info - via Meetup, via [Slack](http://bit.ly/web-hh) + +### Greece + +#### Athens + +* [Meetup](https://www.meetup.com/nodejsathens/) +* Frequency of meetups - every two months +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - [Ioannis Nikolaou](https://www.linkedin.com/in/ioannis-nikolaou/) Co-organizers - Stratoula Kalafateli, [Kostas Siabanis](https://github.com/ksiabani), Megaklis Vasilakis +* Organizer contact info - + +### Hungary + +#### Budapest + +* [Meetup](https://www.meetup.com/nodebp/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Oroszi Róbert +* Organizer contact info - + +### India +#### Maharashtra + +##### Pune + +* [Meetup](https://www.meetup.com/JavaScripters) +* Frequency of meetups - monthly +* How to submit a talk? Send your queries to Pune.javascripters@gmail.com or Contact organizers in the meetup page. +* Organizer name - Imran shaikh & Akash Jarad +* Organizer contact info - javascripters.community@gmail.com + +##### Delhi + +* [Meetup](https://www.meetup.com/nodeJS-Devs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Gaurav Gandhi. +* Organizer contact info - + +#### Gujarat + +##### Ahmedabad + +* [Meetup](https://www.meetup.com/meetup-group-iAIoTVuS/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or write to dipesh@rapidops.com +* Organizer name - Dipesh Patel +* Organizer contact info - + +#### Rajasthan + +##### Jaipur + +* [Meetup](https://www.meetup.com/JaipurJS-Developer-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? [Email ayushrawal12@gmail.com](mailto:ayushrawal12@gmail.com) or [reach out to me on LinkedIn](https://www.linkedin.com/in/ayush-rawal) +* Organizer name - [Ayush Rawal](https://github.com/ayush-rawal) +* Organizer contact info - [Email](mailto:ayushrawal12@gmail.com) + +### Indonesia + +#### Jakarta + +* [Meetup](https://www.meetup.com/Node-js-Workshop/) +* Frequency of meetups - monthly - online +* How to submit a talk? [telegram group](https://t.me/nodejsid) +* Organizer name - Lukluk Luhuring Santoso +* Organizer contact info - [Email](mailto:luklukaha@gmail.com) + +### Ireland + +#### Dublin + +* [Meetup](https://www.meetup.com/Dublin-Node-js-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Sean Walsh. Co-organizer: Leanne Vaughey +* Organizer contact info - + +* [Meetup](https://www.meetup.com/Nodeschool-Dublin-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Anton Whalley +* Organizer contact info - + +### Israel + +#### Tel Aviv + +* [Meetup](https://www.meetup.com/NodeJS-Israel/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or submit your talk on [Node.js-IL website](http://www.nodejsil.com/). +* Organizer name - [Idan Dagan](https://github.com/idandagan1), [Guy Segev](https://github.com/guyguyon), [Tomer Omri](https://github.com/TomerOmri) +* Organizer contact info - [Email](mailto:nodejsisrael8@gmail.com) + +### Mexico + +#### Mexico City + +* [Meetup](https://www.meetup.com/NodeBotsMX/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Saúl Buentello +* Organizer contact info - + +### New Zealand + +#### Auckland + +* [Meetup](https://www.meetup.com/AucklandNodeJs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - George Czabania +* Organizer contact info - + +### Russia + +#### Moscow + +* [Meetup](https://www.meetup.com/Moscow-NodeJS-Meetup/) +* Frequency of meetups - every 6-9 month +* How to submit a talk? Contact organizers in the meetup page or use contacts information below +* Organizer name - Denis Izmaylov +* Organizer contact info - [Telegram](https://t.me/DenisIzmaylov) \[Twitter\](https://twitter.com/DenisIzmaylov] [Facebook](https://facebook.com/denis.izmaylov) + +### South Africa + +#### Cape Town + +* [Meetup](https://www.meetup.com/nodecpt/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Guy Bedford +* Organizer contact info - + +### Spain + +#### Madrid + +* [Meetup](https://www.meetup.com/Node-js-Madrid/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Alex Fernández +* Organizer contact info - + +### Thailand + +#### Bangkok + +* [Meetup](https://www.meetup.com/Bangkok-Node-js/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Dylan Jay +* Organizer contact info - + +### Turkey + +#### Istanbul + +* [Meetup](https://www.meetup.com/nodeschool-istanbul/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Arif Çakıroğlu +* Organizer contact info - + +### United States + +#### Arizona + +##### Mesa + +* [Meetup](https://www.meetup.com/NodeAZ/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Chris Matthieu +* Organizer contact info - + +#### California + +##### Los Angeles + +* [js.la](https://js.la) +* Frequency of meetups - monthly +* How to submit a talk? [contribute.js.la](https://contribute.js.la) +* Organizer name - David Guttman +* Organizer contact info - @dguttman on [slack.js.la](https://slack.js.la) + +##### Irvine + +* [Meetup](https://www.meetup.com/Node-JS-OC/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Farsheed Atef +* Organizer contact info - + +##### San Francisco + +* [Meetup](https://www.meetup.com/sfnode/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Dan Shaw +* Organizer contact info - + +* [Meetup](https://www.meetup.com/Node-js-Serverside-Javascripters-Club-SF/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Matt Pardee +* Organizer contact info - + +#### Colorado + +##### Denver + +* [Meetup](https://www.meetup.com/Node-js-Denver-Boulder/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page +* Organizer name - Brooks Patton +* Organizer contact info - + +#### Florida + +##### Jacksonville + +* [Meetup](https://www.meetup.com/Jax-Node-js-UG/) +* [Website](https://www.jaxnode.com) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - David Fekke +* Organizer contact info - David Fekke at gmail dot com + +#### Georgia + +##### Atlanta + +* [Meetup](https://www.meetup.com/Atlanta-Nodejs-Developers/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Ryan Connelly +* Organizer contact info - + +#### Illinois + +##### Chicago + +* [Meetup](https://www.meetup.com/Chicago-Nodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or (https://airtable.com/shrTDwmMH3zsnsWOE) +* Organizer name - Mike Hostetler, Zeke Nierenberg, & Ben Neiswander +* Organizer contact info - + +#### Indiana + +##### Indianapolis + +* [Meetup](https://www.meetup.com/Node-indy/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Mike Seidle +* Organizer contact info - + +#### Massachusetts + +##### Boston + +* [Meetup](https://www.meetup.com/Boston-Node/) +* Frequency of meetups - ~monthly +* How to submit a talk? Contact organizers in the meetup page or post in slack workspace #\_node\_meetup (see below). +* Organizer name - [Brian Sodano](https://github.com/codemouse) +* Organizer contact info - [briansodano@gmail.com](mailto:briansodano@gmail.com) or [Boston JS slack workspace](https://bostonjavascript.slack.com) + +#### Michigan + +##### Detroit + +* [Meetup](https://www.meetup.com/DetNode/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Steve Marsh, Norman Witte and Israel V + +#### Minnesota + +##### Minneapolis + +* [Meetup](https://www.meetup.com/NodeMN/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Mike Frey +* Organizer contact info - + +#### New York + +##### New York + +* [Meetup](https://www.meetup.com/nodejs/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Patrick Scott Co-organizer: Matt Walters. +* Organizer contact info - +* How to submit a talk? Contact Pat Scott @ [pat@patscott.io](mailto:pat@patscott.io). Matt Walters @ [meetup@iammattwalters.com](mailto:meetup@iammattwalters.com). +* Slack: [join.thenodejsmeetup.com](http://join.thenodejsmeetup.com/) +* Videos: [https://www.youtube.com/c/thenodejsmeetup](https://www.youtube.com/c/thenodejsmeetup) + +#### North Carolina + +##### Raleigh Durham + +* [Meetup](https://www.meetup.com/triangle-nodejs/) +* Frequency of meetups - quarterly +* How to submit a talk? Email ladyleet@nodejs.org +* Organizer name - Tracy Lee +* Organizer contact info - ladyleet@nodejs.org + +#### Oregon + +##### Portland + +* [Meetup](http://pdxnode.org/) +* Frequency of meetups - Biweekly (presentation night 2nd Thursdays, hack night last Thursdays) +* How to submit a talk? [Submit a talk proposal](https://github.com/PDXNode/pdxnode/issues/new), or DM [@obensource](https://twitter.com/obensource) or [@MichelleJLevine](https://twitter.com/MichelleJLevine) on twitter +* Organizer names - Ben Michel, Michelle Levine +* Organizer contact info - Ben: benpmichel@gmail.com, Michelle: michelle@michellejl.com + +#### Pennsylvania + +##### Philadelphia + +* [Meetup](https://www.meetup.com/nodejs-philly/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page: https://www.meetup.com/nodejs-philly/members/14283814/ +* Organizer name - Leomar Durán +* Organizer contact info - + +#### Texas + +##### Austin + +* [Meetup](https://www.meetup.com/austinnodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact Matt Walters @ [meetup@iammattwalters.com](mailto:meetup@iammattwalters.com). +* Organizer name - [Matt Walters](https://github.com/mateodelnorte/) +* Organizer contact info - [meetup@iammattwalters.com](mailto:meetup@iammattwalters.com) +* Slack: [join.thenodejsmeetup.com](http://join.thenodejsmeetup.com/) +* Videos: [https://www.youtube.com/c/thenodejsmeetup](https://www.youtube.com/c/thenodejsmeetup) + +* [Meetup](https://www.meetup.com/ATXNodeSchool/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Stefan von Ellenrieder +* Organizer contact info - + +##### Dallas + +* [Meetup](https://www.meetup.com/DallasNode/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - [Cameron Steele](https://github.com/ATechAdventurer) +* Organizer contact info - [Cam.steeleis@gmail.com](mailto:Cam.steeleis@gmail.com) + +#### Utah + +##### Salt Lake City + +* [Meetup](https://www.meetup.com/utahnodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Aaron Seth Madsen +* Organizer contact info - + +#### Washington + +##### Seattle + +* [Meetup](https://www.meetup.com/Seattle-Node-js/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Ryan Roemer +* Organizer contact info - + +* [Meetup](https://www.meetup.com/Seattle-NodeSchool/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page +* Organizer name - Wil Alvarez +* Organizer contact info - + +#### Washington, DC. + +* [Meetup](https://www.meetup.com/node-dc/) +* Frequency of meetups - monthly +* How to submit a talk? Write to Andrew Dunkman adunkman@gmail.com +* Organizer name - Andrew Dunkman +* Organizer contact info - + +### UK + +#### London +##### LNUG + +* [Meetup](https://www.meetup.com/london-nodejs/) +* [GitHub/lnug](https://github.com/lnug/) +* Frequency of meetups - monthly +* How to submit a talk? Visit our [speakers repos](https://github.com/lnug/speakers), read the guidelines, and [submit a talk proposal as a new issue](https://github.com/lnug/speakers/issues). +* Organizer name - Adam Davis +* Organizer contact info - contact@lnug.org, [@lnugOrg](https://twitter.com/lnugorg) + +##### Node.js Workshops + +* [Meetup](https://www.meetup.com/NodeWorkshops//) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Daryn Holmes +* Organizer contact info - + +#### Cambridge + +* [Meetup](https://www.meetup.com/JavaScript-Cambridge/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Joe Parry, co-organizer Rob Moran +* Organizer contact info - + +#### Oxford + +* [JSOxford](https://www.meetup.com/jsoxford/) +* Frequency of meetups - every 2 months +* How to submit a talk? [Submit Form](https://docs.google.com/forms/d/e/1FAIpQLSflx7LU44PuwlyCJj-WwlP_SlrUvxAd8uaXlY7_O65c7RLpGQ/viewform?usp=sf_link) +* Organizer names - Marcus Noble, Seren Davies +* Organizers contact info - organisers@jsoxford.com + +#### Edinburgh + +* [Node.js Edinburgh](https://www.meetup.com/Nodejs-Edinburgh/) +* Frequency of meetups - every 2 months +* How to submit a talk? [Submit Talk](mailto:michael@biggles.io?subject=Node.js%20Talk%20Proposal) +* Organizer names - Michael Antczak +* Organizers contact info - [AntczakMichael](https://twitter.com/AntczakMichael) + +### Ukraine + +#### Kiev + +* [Meetup](https://www.meetup.com/NodeUA/), [Old group](https://www.meetup.com/KievNodeJS/) +* Frequency of meetups - 1-8 times a month +* How to submit a talk? Contact organizer by email. +* Organizer name - Timur Shemsedinov +* Organizer contact info - [Email](mailto:timur.shemsedinov@gmail.com) diff --git a/locale/fr/index.md b/locale/fr/index.md index 47a9047c0509..3d9d3a572865 100644 --- a/locale/fr/index.md +++ b/locale/fr/index.md @@ -15,6 +15,9 @@ labels: api: Documentation API version-schedule-prompt: Ou regardez le version-schedule-prompt-link-text: Planning LTS. + newsletter: true + newsletter-prefix: Sign up for + newsletter-postfix: ", the official Node.js Monthly Newsletter." --- Node.js® est un environnement d’exécution JavaScript construit sur le [moteur JavaScript V8 de Chrome](https://v8.dev/). diff --git a/locale/fr/knowledge/HTTP/clients/how-to-access-query-string-parameters.md b/locale/fr/knowledge/HTTP/clients/how-to-access-query-string-parameters.md new file mode 100644 index 000000000000..4db8a1043616 --- /dev/null +++ b/locale/fr/knowledge/HTTP/clients/how-to-access-query-string-parameters.md @@ -0,0 +1,54 @@ +--- +title: How to access query string parameters +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +In Node.js, functionality to aid in the accessing of URL query string parameters is built into the standard library. The built-in `url.parse` method takes care of most of the heavy lifting for us. Here is an example script using this handy function and an explanation on how it works: + +```js +const http = require('http'); +const url = require('url'); + +http.createServer(function (req, res) { + const queryObject = url.parse(req.url,true).query; + console.log(queryObject); + + res.writeHead(200, {'Content-Type': 'text/html'}); + res.end('Feel free to add query parameters to the end of the url'); +}).listen(8080); +``` + +> To test this code run `node app.js` (app.js is name of the file) on the terminal and then go to your browser and type `http://localhost:8080/app.js?foo=bad&baz=foo` on the URL bar + +The key part of this whole script is this line: `const queryObject = url.parse(req.url,true).query;`. Let's take a look at things from the inside-out. First off, `req.url` will look like `/app.js?foo=bad&baz=foo`. This is the part that is in the URL bar of the browser. Next, it gets passed to `url.parse` which parses out the various elements of the URL (NOTE: the second paramater is a boolean stating whether the method should parse the query string, so we set it to true). Finally, we access the `.query` property, which returns us a nice, friendly JavaScript object with our query string data. + +The `url.parse()` method returns an object which have many key value pairs one of which is the `query` object. Some other handy information returned by the method include `host`, `pathname`, `search` keys. + +In the above code: + +* `url.parse(req.url,true).query` returns `{ foo: 'bad', baz: 'foo' }`. +* `url.parse(req.url,true).host` returns `'localhost:8080'`. +* `url.parse(req.url,true).pathname` returns `'/app.js'`. +* `url.parse(req.url,true).search` returns `'?foo=bad&baz=foo'`. + +### Parsing with querystring + +Another way to access query string parameters is parsing them using the `querystring` builtin Node.js module. + +This method, however, must be passed just a querystring portion of a url. Passing it the whole url, like you did in the `url.parse` example, won't parse the querystrings. + +```js +const querystring = require('querystring'); +const url = "http://example.com/index.html?code=string&key=12&id=false"; +const qs = "code=string&key=12&id=false"; + +console.log(querystring.parse(qs)); +// > { code: 'string', key: '12', id: 'false' } + +console.log(querystring.parse(url)); +// > { 'http://example.com/index.html?code': 'string', key: '12', id: 'false' } +``` diff --git a/locale/fr/knowledge/HTTP/clients/how-to-create-a-HTTP-request.md b/locale/fr/knowledge/HTTP/clients/how-to-create-a-HTTP-request.md new file mode 100644 index 000000000000..2fcbd9ef8d81 --- /dev/null +++ b/locale/fr/knowledge/HTTP/clients/how-to-create-a-HTTP-request.md @@ -0,0 +1,99 @@ +--- +title: How do I make a http request? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - http +difficulty: 2 +layout: knowledge-post.hbs +--- + +Another extremely common programming task is making an HTTP request to a web server. Node.js provides an extremely simple API for this functionality in the form of `http.request`. + +As an example, we are going to preform a GET request to (which returns a random integer between 1 and 10) and print the result to the console. + +```javascript +var http = require('http'); + +//The url we want is: 'www.random.org/integers/?num=1&min=1&max=10&col=1&base=10&format=plain&rnd=new' +var options = { + host: 'www.random.org', + path: '/integers/?num=1&min=1&max=10&col=1&base=10&format=plain&rnd=new' +}; + +callback = function(response) { + var str = ''; + + //another chunk of data has been received, so append it to `str` + response.on('data', function (chunk) { + str += chunk; + }); + + //the whole response has been received, so we just print it out here + response.on('end', function () { + console.log(str); + }); +} + +http.request(options, callback).end(); +``` + +Making a POST request is just as easy. We will make a POST request to `www.nodejitsu.com:1337` which is running a server that will echo back what we post. The code for making a POST request is almost identical to making a GET request, just a few simple modifications: + +```javascript +var http = require('http'); + +//The url we want is `www.nodejitsu.com:1337/` +var options = { + host: 'www.nodejitsu.com', + path: '/', + //since we are listening on a custom port, we need to specify it by hand + port: '1337', + //This is what changes the request to a POST request + method: 'POST' +}; + +callback = function(response) { + var str = '' + response.on('data', function (chunk) { + str += chunk; + }); + + response.on('end', function () { + console.log(str); + }); +} + +var req = http.request(options, callback); +//This is the data we are posting, it needs to be a string or a buffer +req.write("hello world!"); +req.end(); +``` + +Throwing in custom headers is just a tiny bit harder. On `www.nodejitsu.com:1338` we are running a server that will print out the `custom` header. So we will just make a quick request to it: + +```javascript +var http = require('http'); + +var options = { + host: 'www.nodejitsu.com', + path: '/', + port: '1338', + //This is the only line that is new. `headers` is an object with the headers to request + headers: {'custom': 'Custom Header Demo works'} +}; + +callback = function(response) { + var str = '' + response.on('data', function (chunk) { + str += chunk; + }); + + response.on('end', function () { + console.log(str); + }); +} + +var req = http.request(options, callback); +req.end(); +``` diff --git a/locale/fr/knowledge/HTTP/servers/how-to-create-a-HTTP-server.md b/locale/fr/knowledge/HTTP/servers/how-to-create-a-HTTP-server.md new file mode 100644 index 000000000000..bb15f042c8e8 --- /dev/null +++ b/locale/fr/knowledge/HTTP/servers/how-to-create-a-HTTP-server.md @@ -0,0 +1,42 @@ +--- +title: How do I create a HTTP server? +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +Making a simple HTTP server in Node.js has become the de facto 'hello world' for the platform. On the one hand, Node.js provides extremely easy-to-use HTTP APIs; on the other hand, a simple web server also serves as an excellent demonstration of the asynchronous strengths of Node.js. + +Let's take a look at a very simple example: + +```javascript +const http = require('http'); + +const requestListener = function (req, res) { + res.writeHead(200); + res.end('Hello, World!'); +} + +const server = http.createServer(requestListener); +server.listen(8080); +``` + +Save this in a file called `server.js` - run `node server.js`, and your program will hang there... it's waiting for connections to respond to, so you'll have to give it one if you want to see it do anything. Try opening up a browser, and typing `localhost:8080` into the location bar. If everything has been set up correctly, you should see your server saying hello! + +Also, from your terminal you should be able to get the response using curl: + +``` +curl localhost:8080 +``` + +Let's take a more in-depth look at what the above code is doing. First, a function is defined called `requestListener` that takes a request object and a response object as parameters. + +The request object contains things such as the requested URL, but in this example we ignore it and always return "Hello World". + +The response object is how we send the headers and contents of the response back to the user making the request. Here we return a 200 response code (signaling a successful response) with the body "Hello World". Other headers, such as `Content-type`, would also be set here. + +Next, the `http.createServer` method creates a server that calls `requestListener` whenever a request comes in. The next line, `server.listen(8080)`, calls the `listen` method, which causes the server to wait for incoming requests on the specified port - 8080, in this case. + +There you have it - your most basic Node.js HTTP server. diff --git a/locale/fr/knowledge/HTTP/servers/how-to-create-a-HTTPS-server.md b/locale/fr/knowledge/HTTP/servers/how-to-create-a-HTTPS-server.md new file mode 100644 index 000000000000..7ab2dcfa0b13 --- /dev/null +++ b/locale/fr/knowledge/HTTP/servers/how-to-create-a-HTTPS-server.md @@ -0,0 +1,54 @@ +--- +title: How to create an https server? +date: '2011-08-26T10:08:50.000Z' +tags: + - https +difficulty: 1 +layout: knowledge-post.hbs +--- + +*If you're using [Nodejitsu](http://nodejitsu.com)*, we handle HTTPS for you. Free SSL on jit.su and nodejitsu.com subdomains, and SSL on custom domains for business customers. *It's never necessary to create an HTTPS server yourself.* + +--- + +To create an HTTPS server, you need two things: an SSL certificate, and built-in `https` Node.js module. + +We need to start out with a word about SSL certificates. Speaking generally, there are two kinds of certificates: those signed by a 'Certificate Authority', or CA, and 'self-signed certificates'. A Certificate Authority is a trusted source for an SSL certificate, and using a certificate from a CA allows your users to be trust the identity of your website. In most cases, you would want to use a CA-signed certificate in a production environment - for testing purposes, however, a self-signed certicate will do just fine. + +To generate a self-signed certificate, run the following in your shell: + +``` +openssl genrsa -out key.pem +openssl req -new -key key.pem -out csr.pem +openssl x509 -req -days 9999 -in csr.pem -signkey key.pem -out cert.pem +rm csr.pem +``` + +This should leave you with two files, `cert.pem` (the certificate) and `key.pem` (the private key). Put these files in the same directory as your Node.js server file. This is all you need for a SSL connection. So now you set up a quick hello world example (the biggest difference between https and [http](/en/knowledge/HTTP/servers/how-to-create-a-HTTP-server/) is the `options` parameter): + +```javascript +const https = require('https'); +const fs = require('fs'); + +const options = { + key: fs.readFileSync('key.pem'), + cert: fs.readFileSync('cert.pem') +}; + +https.createServer(options, function (req, res) { + res.writeHead(200); + res.end("hello world\n"); +}).listen(8000); +``` + +NODE PRO TIP: Note `fs.readFileSync` - unlike `fs.readFile`, `fs.readFileSync` will block the entire process until it completes. In situations like this - loading vital configuration data - the `sync` functions are okay. In a busy server, however, using a synchronous function during a request will force the server to deal with the requests one by one! + +> To start your https server, run `node app.js` (here, app.js is name of the file) on the terminal. + +Now that your server is set up and started, you should be able to get the file with curl: + +``` +curl -k https://localhost:8000 +``` + +or in your browser, by going to https://localhost:8000 . diff --git a/locale/fr/knowledge/HTTP/servers/how-to-handle-multipart-form-data.md b/locale/fr/knowledge/HTTP/servers/how-to-handle-multipart-form-data.md new file mode 100644 index 000000000000..3121840d80c6 --- /dev/null +++ b/locale/fr/knowledge/HTTP/servers/how-to-handle-multipart-form-data.md @@ -0,0 +1,65 @@ +--- +title: How to handle multipart form data +date: '2011-09-09T10:08:50.000Z' +tags: + - http + - forms + - multipart + - uploads +difficulty: 3 +layout: knowledge-post.hbs +--- + +Handling form data and file uploads properly is an important and complex problem in HTTP servers. Doing it by hand would involve parsing streaming binary data, writing it to the file system, parsing out other form data, and several other complex concerns - luckily, only a very few people will need to worry about it on that deep level. Felix Geisendorfer, one of the Node.js core committers, wrote a library called `node-formidable` that handles all the hard parts for you. With its friendly API, you can be parsing forms and receiving file uploads in no time. + +This example is taken directly from the `node-formidable` GitHub page, with some additional explanation added. + +```javascript +var formidable = require('formidable'), + http = require('http'), + util = require('util'); + +http.createServer(function(req, res) { + + // This if statement is here to catch form submissions, and initiate multipart form data parsing. + + if (req.url == '/upload' && req.method.toLowerCase() == 'post') { + + // Instantiate a new formidable form for processing. + + var form = new formidable.IncomingForm(); + + // form.parse analyzes the incoming stream data, picking apart the different fields and files for you. + + form.parse(req, function(err, fields, files) { + if (err) { + + // Check for and handle any errors here. + + console.error(err.message); + return; + } + res.writeHead(200, {'content-type': 'text/plain'}); + res.write('received upload:\n\n'); + + // This last line responds to the form submission with a list of the parsed data and files. + + res.end(util.inspect({fields: fields, files: files})); + }); + return; + } + + // If this is a regular request, and not a form submission, then send the form. + + res.writeHead(200, {'content-type': 'text/html'}); + res.end( + '
'+ + '
'+ + '
'+ + ''+ + '
' + ); +}).listen(8080); +``` + +Try it out for yourself - it's definitely the simpler solution, and `node-formidable` is a battle-hardened, production-ready library. Let userland solve problems like this for you, so that you can get back to writing the rest of your code! diff --git a/locale/fr/knowledge/HTTP/servers/how-to-read-POST-data.md b/locale/fr/knowledge/HTTP/servers/how-to-read-POST-data.md new file mode 100644 index 000000000000..90d4defb6e54 --- /dev/null +++ b/locale/fr/knowledge/HTTP/servers/how-to-read-POST-data.md @@ -0,0 +1,45 @@ +--- +title: How can I read POST data? +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +Reading the data from a POST request (i.e. a form submission) can be a little bit of a pitfall in Node.js, so we're going to go through an example of how to do it properly. The first step, obviously, is to listen for incoming data - the trick is to wait for the data to finish, so that you can process all the form data without losing anything. + +Here is a quick script that shows you how to do exactly that: + +```javascript +var http = require('http'); +var postHTML = + 'Post Example' + + '' + + '
' + + 'Input 1:
' + + 'Input 2:
' + + '' + + '
' + + ''; + +http.createServer(function (req, res) { + var body = ""; + req.on('data', function (chunk) { + body += chunk; + }); + req.on('end', function () { + console.log('POSTed: ' + body); + res.writeHead(200); + res.end(postHTML); + }); +}).listen(8080); +``` + +The variable `postHTML` is a static string containing the HTML for two input boxes and a submit box - this HTML is provided so that you can `POST` example data. This is NOT the right way to serve static HTML - please see [How to Serve Static Files](/en/knowledge/HTTP/servers/how-to-serve-static-files/) for a more proper example. + +With the HTML out of the way, we [create a server](/en/knowledge/HTTP/servers/how-to-create-a-HTTP-server/) to listen for requests. It is important to note, when listening for POST data, that the `req` object is also an [Event Emitter](/en/knowledge/getting-started/control-flow/what-are-event-emitters/). `req`, therefore, will emit a `data` event whenever a 'chunk' of incoming data is received; when there is no more incoming data, the `end` event is emitted. So, in our case, we listen for `data` events. Once all the data is received, we log the data to the console and send the response. + +Something important to note is that the event listeners are being added immediately after the request object is received. If you don't immediately set them, then there is a possibility of missing some of the events. If, for example, an event listener was attached from inside a callback, then the `data` and `end` events might be fired in the meantime with no listeners attached! + +You can save this script to `server.js` and run it with `node server.js`. Once you run it you will notice that occasionally you will see lines with no data, e.g. `POSTed:`. This happens because regular `GET` requests go through the same codepath. In a more 'real-world' application, it would be proper practice to check the type of request and handle the different request types differently. diff --git a/locale/fr/knowledge/HTTP/servers/how-to-serve-static-files.md b/locale/fr/knowledge/HTTP/servers/how-to-serve-static-files.md new file mode 100644 index 000000000000..327118288865 --- /dev/null +++ b/locale/fr/knowledge/HTTP/servers/how-to-serve-static-files.md @@ -0,0 +1,46 @@ +--- +title: How to serve static files +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +A basic necessity for most [http servers](/en/knowledge/HTTP/servers/how-to-create-a-HTTPS-server/) is to be able to serve static files. Thankfully, it is not that hard to do in Node.js. First you [read the file](/en/knowledge/file-system/how-to-read-files-in-nodejs/), then you serve the file. Here is an example of a script that will serve the files in the current directory: + +```javascript +var fs = require('fs'), + http = require('http'); + +http.createServer(function (req, res) { + fs.readFile(__dirname + req.url, function (err,data) { + if (err) { + res.writeHead(404); + res.end(JSON.stringify(err)); + return; + } + res.writeHead(200); + res.end(data); + }); +}).listen(8080); +``` + +This example takes the path requested and it serves that path, relative to the local directory. This works fine as a quick solution; however, there are a few problems with this approach. First, this code does not correctly handle mime types. Additionally, a proper static file server should really be taking advantage of client side caching, and should send a "Not Modified" response if nothing has changed. Furthermore, there are security bugs that can enable a malicious user to break out of the current directory. (for example, `GET /../../../`). + +Each of these can be addressed invidually without much difficulty. You can send the proper mime type header. You can figure how to utilize the client caches. You can take advantage of `path.normalize` to make sure that requests don't break out of the current directory. But why write all that code when you can just use someone else's library? + +There is a good static file server called [node-static](https://github.com/cloudhead/node-static) written by Alexis Sellier which you can leverage. Here is a script which functions similarly to the previous one: + +```javascript +var static = require('node-static'); +var http = require('http'); + +var file = new(static.Server)(); + +http.createServer(function (req, res) { + file.serve(req, res); +}).listen(8080); +``` + +This is a fully functional file server that doesn't have any of the bugs previously mentioned. This is just the most basic set up, there are more things you can do if you look at [the api](https://github.com/cloudhead/node-static). Also since it is an open source project, you can always modify it to your needs (and feel free to contribute back to the project!). diff --git a/locale/fr/knowledge/REPL/how-to-create-a-custom-repl.md b/locale/fr/knowledge/REPL/how-to-create-a-custom-repl.md new file mode 100644 index 000000000000..a57195f66605 --- /dev/null +++ b/locale/fr/knowledge/REPL/how-to-create-a-custom-repl.md @@ -0,0 +1,105 @@ +--- +title: How to create and use a custom REPL +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - repl +difficulty: 2 +layout: knowledge-post.hbs +--- + +Node.js allows users to create their own REPLs with the [repl module](https://nodejs.org/api/repl.html). Its basic use looks like this: + +```js +var repl = require('repl') + +repl.start(prompt, stream); +``` + +Above, `prompt` is a string that's used for the prompt of your REPL (which defaults to "> ") and `stream` is the stream that the repl listens on, defaulting to `process.stdin`. When you run the standalone `node` REPL from the command prompt, what it's doing in the background is running `repl.start()` to give you the standard REPL. + +However, the repl is pretty flexible. Here's an example that shows this off: + +```js +#!/usr/bin/env node + +var net = require("net"); +var repl = require("repl"); + +var mood = function () { + var m = [ "^__^", "-___-;", ">.<", "<_>" ]; + return m[Math.floor(Math.random()*m.length)]; +}; + +//A remote node repl that you can telnet to! +net.createServer(function (socket) { + var remote = repl.start("node::remote> ", socket); + //Adding "mood" and "bonus" to the remote REPL's context. + remote.context.mood = mood; + remote.context.bonus = "UNLOCKED"; +}).listen(5001); + +console.log("Remote REPL started on port 5001."); + +//A "local" node repl with a custom prompt +var local = repl.start("node::local> "); + +// Exposing the function "mood" to the local REPL's context. +local.context.mood = mood; +``` + +This script creates *two* REPLs: One is normal excepting for its custom prompt, but the *other* is exposed via the net module so you can telnet to it! In addition, it uses the `context` property to expose the function "mood" to both REPLs, and the "bonus" string to the remote REPL only. As you will see, this approach of trying to expose objects to one REPL and not the other *doesn't really work*. + +In addition, all objects in the global scope will also be accessible to your REPLs. + +Here's what happens when you run the script: + +```shell +$ node repl.js +Remote REPL started on port 5001. +node::local> .exit +# -C + +$ node repl.js +Remote REPL started on port 5001. +node::local> mood() +'^__^' +node::local> bonus +ReferenceError: bonus is not defined +``` + +As may be seen, the `mood` function is usable within the local REPL, but the `bonus` string is not. This is as expected. + +Now, here's what happens when you try to telnet to port 5001: + +```shell +$ telnet localhost 5001 +Trying ::1... +Trying 127.0.0.1... +Connected to localhost. +Escape character is '^]'. +node::remote> mood() +'>.<' +node::remote> bonus +'UNLOCKED' +``` + +As you can see, the `mood` function is *also* available over telnet! In addition, so is "bonus". + +As an interesting consequence of my actions, bonus is now also defined on the local REPL: + +```shell +node::local> bonus +'UNLOCKED' +``` + +It seems we "unlocked" the `bonus` string on the local REPL as well. As it turns out, any variables created in one REPL are also available to the other: + +```shell +node::local> var node = "AWESOME!" + +node::remote> node +'AWESOME!' +``` + +As you can see, the node REPL is powerful and flexible. diff --git a/locale/fr/knowledge/REPL/how-to-use-nodejs-repl.md b/locale/fr/knowledge/REPL/how-to-use-nodejs-repl.md new file mode 100644 index 000000000000..a05589df85b7 --- /dev/null +++ b/locale/fr/knowledge/REPL/how-to-use-nodejs-repl.md @@ -0,0 +1,119 @@ +--- +title: "How do I use node's REPL?" +date: '2011-08-26T10:08:50.000Z' +tags: + - cli + - repl +difficulty: 1 +layout: knowledge-post.hbs +--- + +# Learn to use the REPL + +Node.js ships with a Read-Eval-Print Loop, also known as a REPL. It is the Node.js interactive shell; any valid JavaScript which can be written in a script can be passed to the REPL. It can be extremely useful for experimenting with Node.js, debugging code, and figuring out some of JavaScript's more eccentric behaviors. + +Node.js has a standalone REPL accessible from the command line, and a built in REPL module you can use to [create your own custom REPLs](https://nodejs.org/api/repl.html#repl_repl). We are going to learn about the basics of the standalone REPL. + +## How to Start the REPL + +Starting the REPL is simple - just run node on the command line without a filename. + +```shell +node +``` + +It then drops you into a simple prompt ('>') where you can type any JavaScript command you wish. As in most shells, you can press the up and down arrow keys to scroll through your command history and modify previous commands. + +```shell +$ node +> var x = "Hello, World!" +undefined +> x +"Hello, World!" +> .exit +``` + +You can also use the `Tab` key to autocomplete some commands. When multiple autocomplete options are available, hit `Tab` again to cycle through them. + +## Special Commands and Exiting the REPL + +The following special commands are supported by all REPL instances (from [Node.js REPL docs](https://nodejs.org/api/repl.html#repl_commands_and_special_keys): + +* `.exit` - Close the I/O stream, causing the REPL to exit. +* `.break` - When in the process of inputting a multi-line expression, entering the `.break` command (or pressing the `-C` key combination) will abort further input or processing of that expression. +* `.clear` - Resets the REPL `context` to an empty object and clears any multi-line expression currently being input. +* `.help` - Show this list of special commands. +* `.save` - Save the current REPL session to a file: `> .save ./file/to/save.js` +* `.load` - Load a file into the current REPL session. `> .load ./file/to/load.js` +* `.editor` - Enter editor mode (`-D` to finish, `-C` to cancel). + +```shell +> .editor +# Entering editor mode (-D to finish, -C to cancel) +function welcome(name) { + return `Hello ${name}!`; +} + +welcome('Node.js User'); + +# -D +'Hello Node.js User!' +> +``` + +The following key combinations in the REPL have these special effects: + +* `-C` - When pressed once, has the same effect as the `.break` command. When pressed twice on a blank line, has the same effect as the `.exit` command. +* `-D` - Has the same effect as the `.exit` command. +* `` - When pressed on a blank line, displays global and local (scope) variables. When pressed while entering other input, displays relevant autocompletion options. + +## Return Values + +Whenever you type a command, it will print the return value of the command. If you want to reuse the previous return value, you can use the special `_` variable. + +For example: + +```shell +$ node +> 1+1 +2 +> _+1 +3 +``` + +One thing worth noting where REPL return values are concerned: + +```shell +> x = 10 +10 +> var y = 5 +> x +10 +> y +5 +``` + +When the `var` keyword is used, the value of the expression is stored, but *NOT* returned. When a bare identifier is used, the value is also returned, as well as stored. + +## Accessing Modules + +If you need to access any of the builtin modules, or any third party modules, they can be accessed with `require`, just like in the rest of Node. + +For example: + +```shell +$ node +> path = require('path') +{ resolve: [Function], + normalize: [Function], + join: [Function], + dirname: [Function], + basename: [Function], + extname: [Function], + exists: [Function], + existsSync: [Function] } +> path.basename("/a/b/c.txt") +'c.txt' +``` + +Note once again that without the `var` keyword, the contents of the object are returned immediately and displayed to `stdout`. diff --git a/locale/fr/knowledge/advanced/buffers/how-to-use-buffers.md b/locale/fr/knowledge/advanced/buffers/how-to-use-buffers.md new file mode 100644 index 000000000000..e6e56ef33cc9 --- /dev/null +++ b/locale/fr/knowledge/advanced/buffers/how-to-use-buffers.md @@ -0,0 +1,188 @@ +--- +title: How to Use Buffers in Node.js +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - Buffer + - buffer + - buffers + - binary +difficulty: 3 +layout: knowledge-post.hbs +--- + +## Why Buffers? + +Pure JavaScript, while great with unicode-encoded strings, does not handle straight binary data very well. This is fine on the browser, where most data is in the form of strings. However, Node.js servers have to also deal with TCP streams and reading and writing to the filesystem, both of which make it necessary to deal with purely binary streams of data. + +One way to handle this problem is to just use strings *anyway*, which is exactly what Node.js did at first. However, this approach is extremely problematic to work with; It's slow, makes you work with an API designed for strings and not binary data, and has a tendency to break in strange and mysterious ways. + +Don't use binary strings. Use *buffers* instead! + +## What Are Buffers? + +The `Buffer` class in Node.js is designed to handle raw binary data. Each buffer corresponds to some raw memory allocated outside V8. Buffers act somewhat like arrays of integers, but aren't resizable and have a whole bunch of methods specifically for binary data. The integers in a buffer each represent a byte and so are limited to values from 0 to 255 inclusive. When using `console.log()` to print the `Buffer` instance, you'll get a chain of values in hexadecimal values. + +## Where You See Buffers: + +In the wild, buffers are usually seen in the context of binary data coming from streams, such as `fs.createReadStream`. + +## Usage: + +### Creating Buffers: + +There are a few ways to create new buffers: + +```js +var buffer = Buffer.alloc(8); +// This will print out 8 bytes of zero: +// +``` + +This buffer is initialized and contains 8 bytes of zero. + +```js +var buffer = Buffer.from([ 8, 6, 7, 5, 3, 0, 9]); +// This will print out 8 bytes of certain values: +// +``` + +This initializes the buffer to the contents of this array. Keep in mind that the contents of the array are integers representing bytes. + +```js +var buffer = Buffer.from("I'm a string!", "utf-8"); +// This will print out a chain of values in utf-8: +// +``` + +This initializes the buffer to a binary encoding of the first string as specified by the second argument (in this case, `'utf-8'`). `'utf-8'` is by far the most common encoding used with Node.js, but `Buffer` also supports others. See [Supported Encodings](https://nodejs.org/dist/latest/docs/api/buffer.html#buffer_buffers_and_character_encodings) for more details. + +### Writing to Buffers + +Given that there is already a buffer created: + +``` +> var buffer = Buffer.alloc(16) +``` + +we can start writing strings to it: + +``` +> buffer.write("Hello", "utf-8") +5 +``` + +The first argument to `buffer.write` is the string to write to the buffer, and the second argument is the string encoding. It happens to default to utf-8 so this argument is extraneous. + +`buffer.write` returned 5. This means that we wrote to five bytes of the buffer. The fact that the string "Hello" is also 5 characters long is coincidental, since each character *just happened* to be 8 bits apiece. This is useful if you want to complete the message: + +``` +> buffer.write(" world!", 5, "utf-8") +7 +``` + +When `buffer.write` has 3 arguments, the second argument indicates an offset, or the index of the buffer to start writing at. + +### Reading from Buffers: + +#### toString: + +Probably the most common way to read buffers is to use the `toString` method, since many buffers contain text: + +``` +> buffer.toString('utf-8') +'Hello world!\u0000�k\t' +``` + +Again, the first argument is the encoding. In this case, it can be seen that not the entire buffer was used! Luckily, because we know how many bytes we've written to the buffer, we can simply add more arguments to "stringify" the slice that's actually interesting: + +``` +> buffer.toString("utf-8", 0, 12) +'Hello world!' +``` + +#### Individual octets: + +You can also set individual bytes by using an array-like syntax: + +``` +> buffer[12] = buffer[11]; +33 +> buffer[13] = "1".charCodeAt(); +49 +> buffer[14] = buffer[13]; +49 +> buffer[15] = 33 +33 +> buffer.toString("utf-8") +'Hello world!!11!' +``` + +In this example, I set the remaining bytes, by hand, such that they represent utf-8 encoded "!" and "1" characters. + +### More Fun With Buffers + +#### Buffer.isBuffer(object) + +This method checks to see if `object` is a buffer, similar to `Array.isArray`. + +#### Buffer.byteLength(string, encoding) + +With this function, you can check the number of bytes required to encode a string with a given encoding (which defaults to utf-8). This length is *not* the same as string length, since many characters require more bytes to encode. For example: + +``` +> var snowman = "☃"; +> snowman.length +1 +> Buffer.byteLength(snowman) +3 +``` + +The unicode snowman is only one character, but takes 3 entire bytes to encode! + +#### buffer.length + +This is the length of your buffer, and represents how much memory is allocated. It is not the same as the size of the buffer's contents, since a buffer may be half-filled. For example: + +``` +> var buffer = Buffer.alloc(16) +> buffer.write(snowman) +3 +> buffer.length +16 +``` + +In this example, the contents written to the buffer only consist of three groups (since they represent the single-character snowman), but the buffer's length is still 16, as it was initialized. + +#### buffer.copy(target, targetStart=0, sourceStart=0, sourceEnd=buffer.length) + +`buffer.copy` allows one to copy the contents of one buffer onto another. The first argument is the target buffer on which to copy the contents of `buffer`, and the rest of the arguments allow for copying only a subsection of the source buffer to somewhere in the middle of the target buffer. For example: + +``` +> var frosty = Buffer.alloc(24) +> var snowman = Buffer.from("☃", "utf-8") +> frosty.write("Happy birthday! ", "utf-8") +16 +> snowman.copy(frosty, 16) +3 +> frosty.toString("utf-8", 0, 19) +'Happy birthday! ☃' +``` + +In this example, I copied the "snowman" buffer, which contains a 3 byte long character, to the "frosty" buffer, to which I had written to the first 16 bytes. Because the snowman character is 3 bytes long, the result takes up 19 bytes of the buffer. + +#### buffer.slice(start, end=buffer.length) + +This method's API is generally the same as that of `Array.prototype.slice`, but with one very import difference: The slice is **not** a new buffer and merely references a subset of the memory space. *Modifying the slice will also modify the original buffer*! For example: + +``` +> var puddle = frosty.slice(16, 19) +> puddle.toString() +'☃' +> puddle.write("___") +3 +> frosty.toString("utf-8", 0, 19) +'Happy birthday! ___' +``` + +Now Frosty has been turned into a puddle of underscores. Bummer. diff --git a/locale/fr/knowledge/advanced/streams/how-to-use-fs-create-read-stream.md b/locale/fr/knowledge/advanced/streams/how-to-use-fs-create-read-stream.md new file mode 100644 index 000000000000..de859af4e768 --- /dev/null +++ b/locale/fr/knowledge/advanced/streams/how-to-use-fs-create-read-stream.md @@ -0,0 +1,36 @@ +--- +title: How to use fs.createReadStream? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams + - fs +difficulty: 3 +layout: knowledge-post.hbs +--- + +The function `fs.createReadStream()` allows you to open up a readable stream in a very simple manner. All you have to do is pass the path of the file to start streaming in. It turns out that the response (as well as the request) objects are streams. So we will use this fact to create a http server that streams the files to the client. Since the code is simple enough, it is pretty easy just to read through it and comment why each line is necessary. + +```javascript +var http = require('http'); +var fs = require('fs'); + +http.createServer(function(req, res) { + // The filename is simple the local directory and tacks on the requested url + var filename = __dirname+req.url; + + // This line opens the file as a readable stream + var readStream = fs.createReadStream(filename); + + // This will wait until we know the readable stream is actually valid before piping + readStream.on('open', function () { + // This just pipes the read stream to the response object (which goes to the client) + readStream.pipe(res); + }); + + // This catches any errors that happen while creating the readable stream (usually invalid names) + readStream.on('error', function(err) { + res.end(err); + }); +}).listen(8080); +``` diff --git a/locale/fr/knowledge/advanced/streams/how-to-use-fs-create-write-stream.md b/locale/fr/knowledge/advanced/streams/how-to-use-fs-create-write-stream.md new file mode 100644 index 000000000000..6c41a122da68 --- /dev/null +++ b/locale/fr/knowledge/advanced/streams/how-to-use-fs-create-write-stream.md @@ -0,0 +1,36 @@ +--- +title: How to use fs.createWriteStream? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams + - fs +difficulty: 3 +layout: knowledge-post.hbs +--- + +The function `fs.createWriteStream()` creates a writable stream in a very simple manner. After a call to `fs.createWriteStream()` with the filepath, you have a writeable stream to work with. It turns out that the response (as well as the request) objects are streams. So we will stream the `POST` data to the file `output`. Since the code is simple enough, it is pretty easy just to read through it and comment why each line is necessary. + +```javascript +var http = require('http'); +var fs = require('fs'); + +http.createServer(function(req, res) { + // This opens up the writeable stream to `output` + var writeStream = fs.createWriteStream('./output'); + + // This pipes the POST data to the file + req.pipe(writeStream); + + // After all the data is saved, respond with a simple html form so they can post more data + req.on('end', function () { + res.writeHead(200, {"content-type":"text/html"}); + res.end('
'); + }); + + // This is here incase any errors occur + writeStream.on('error', function (err) { + console.log(err); + }); +}).listen(8080); +``` diff --git a/locale/fr/knowledge/advanced/streams/how-to-use-stream-pipe.md b/locale/fr/knowledge/advanced/streams/how-to-use-stream-pipe.md new file mode 100644 index 000000000000..b357ce7efb42 --- /dev/null +++ b/locale/fr/knowledge/advanced/streams/how-to-use-stream-pipe.md @@ -0,0 +1,94 @@ +--- +title: How to use stream.pipe +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams +difficulty: 2 +layout: knowledge-post.hbs +--- + +If you've been using Node.js for a while, you've definitely run into streams. HTTP connections are streams, open files are streams; stdin, stdout, and stderr are all streams as well. A 'stream' is node's I/O abstraction - if you feel like you still need to understand them better, you can read more about them [here](https://nodejs.org/api/stream.html#stream_stream). + +Streams make for quite a handy abstraction, and there's a lot you can do with them - as an example, let's take a look at `stream.pipe()`, the method used to take a readable stream and connect it to a writeable steam. Suppose we want to spawn a `node` child process and pipe our stdout and stdin to its corresponding stdout and stdin. + +```javascript +#!/usr/bin/env node + +var child = require('child_process'); + +var myREPL = child.spawn('node'); + +myREPL.stdout.pipe(process.stdout, { end: false }); + +process.stdin.resume(); + +process.stdin.pipe(myREPL.stdin, { end: false }); + +myREPL.stdin.on('end', function() { + process.stdout.write('REPL stream ended.'); +}); + +myREPL.on('exit', function (code) { + process.exit(code); +}); +``` + +There you have it - spawn the Node.js REPL as a child process, and pipe your stdin and stdout to its stdin and stdout. Make sure to listen for the child's 'exit' event, too, or else your program will just hang there when the REPL exits. + +Another use for `stream.pipe()` is file streams. In Node.js, `fs.createReadStream()` and `fs.createWriteStream()` are used to create a stream to an open file descriptor. Now let's look at how one might use `stream.pipe()` to write to a file. You'll probably recognize most of the code: + +```javascript +#!/usr/bin/env node + +var child = require('child_process'), + fs = require('fs'); + +var myREPL = child.spawn('node'), + myFile = fs.createWriteStream('myOutput.txt'); + +myREPL.stdout.pipe(process.stdout, { end: false }); +myREPL.stdout.pipe(myFile); + +process.stdin.resume(); + +process.stdin.pipe(myREPL.stdin, { end: false }); +process.stdin.pipe(myFile); + +myREPL.stdin.on("end", function() { + process.stdout.write("REPL stream ended."); +}); + +myREPL.on('exit', function (code) { + process.exit(code); +}); +``` + +With those small additions, your stdin and the stdout from your REPL will both be piped to the writeable file stream you opened to 'myOutput.txt'. It's that simple - you can pipe streams to as many places as you want. + +Another very important use case for `stream.pipe()` is with HTTP request and response objects. Here we have the very simplest kind of proxy: + +```javascript +#!/usr/bin/env node + +var http = require('http'); + +http.createServer(function(request, response) { + var proxy = http.createClient(9000, 'localhost') + var proxyRequest = proxy.request(request.method, request.url, request.headers); + proxyRequest.on('response', function (proxyResponse) { + proxyResponse.pipe(response); + }); + request.pipe(proxyRequest); +}).listen(8080); + +http.createServer(function (req, res) { + res.writeHead(200, { 'Content-Type': 'text/plain' }); + res.write('request successfully proxied to port 9000!' + '\n' + JSON.stringify(req.headers, true, 2)); + res.end(); +}).listen(9000); +``` + +One could also use `stream.pipe()` to send incoming requests to a file for logging, or to a child process, or any one of a number of other things. + +Hopefully this has shown you the basics of using `stream.pipe()` to easily pass your data streams around. It's truly a powerful little trick in Node.js, and its uses are yours to explore. Happy coding, and try not to cross your streams! diff --git a/locale/fr/knowledge/advanced/streams/what-are-streams.md b/locale/fr/knowledge/advanced/streams/what-are-streams.md new file mode 100644 index 000000000000..ab5d98cbfd0a --- /dev/null +++ b/locale/fr/knowledge/advanced/streams/what-are-streams.md @@ -0,0 +1,46 @@ +--- +title: What are streams? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams +difficulty: 3 +layout: knowledge-post.hbs +--- + +Streams are another basic construct in Node.js that encourages asynchronous coding. Streams allow you to process the data as it is generated or retrieved. Streams can be readable, writeable, or both. + +In other words, Streams use events to deal with data as it happens, rather than only with a callback at the end. Readable streams emit the event `data` for each chunk of data that comes in, and an `end` event, which is emitted when there is no more data. Writeable streams can be written to with the `write()` function, and closed with the `end()` function. All types of streams emit `error` events when errors arise. + +As a quick example, we can write a simple version of `cp` (the Unix utility that copies files). We could do that by reading the whole file with standard filesystem calls and then writing it out to a file. Unfortunately, that requires that the whole file be read in before it can be written. In this case, writing the file isn't faster, but if we were streaming over a network or doing CPU processing on the data, then there could be measurable performance improvements. + +Run this script with arguments like `node cp.js src.txt dest.txt`. This would mean, in the code below, that `process.argv[2]` is `src.txt` and `process.argv[3]` is `desc.txt`. + +```javascript +var fs = require('fs'); +console.log(process.argv[2], '->', process.argv[3]); + +var readStream = fs.createReadStream(process.argv[2]); +var writeStream = fs.createWriteStream(process.argv[3]); + +readStream.on('data', function (chunk) { + writeStream.write(chunk); +}); + +readStream.on('end', function () { + writeStream.end(); +}); + +//Some basic error handling +readStream.on('error', function (err) { + console.log("ERROR", err); +}); + +writeStream.on('error', function (err) { + console.log("ERROR", err); +}); +``` + +This sets up a readable stream from the source file and a writable stream to the destination file. Then whenever the readable stream gets data, it gets written to the writeable stream. Then finally it closes the writable stream when the readable stream is finished. + +It would have been better to use [pipe](/en/knowledge/advanced/streams/how-to-use-stream-pipe/) like `readStream.pipe(writeStream);`, however, to show how streams work, we have done things the long way. diff --git a/locale/fr/knowledge/child-processes/how-to-spawn-a-child-process.md b/locale/fr/knowledge/child-processes/how-to-spawn-a-child-process.md new file mode 100644 index 000000000000..245d5d8f7479 --- /dev/null +++ b/locale/fr/knowledge/child-processes/how-to-spawn-a-child-process.md @@ -0,0 +1,60 @@ +--- +title: How to spawn a child process - the basics +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - child_process +difficulty: 2 +layout: knowledge-post.hbs +--- + +If you find yourself wishing you could have your Node.js process start another program for you, then look no further than the `child_process` module. + +The simplest way is the "fire, forget, and buffer" method using `child_process.exec`. It runs your process, buffers its output (up to a default maximum of 200kb), and lets you access it from a callback when it is finished. + +The examples you will see in this article are all Linux-based. On Windows, you need to switch these commands with their Windows alternatives. + +Take a look at an example: + +```js +const { exec } = require('child_process'); + +const ls = exec('ls -l', function (error, stdout, stderr) { + if (error) { + console.log(error.stack); + console.log('Error code: '+error.code); + console.log('Signal received: '+error.signal); + } + console.log('Child Process STDOUT: '+stdout); + console.log('Child Process STDERR: '+stderr); +}); + +ls.on('exit', function (code) { + console.log('Child process exited with exit code '+code); +}); +``` + +`error.stack` is a stack trace to the point that the [Error object](/en/knowledge/errors/what-is-the-error-object/) was created. + +The `stderr` of a given process is not exclusively reserved for error messages. Many programs use it as a channel for secondary data instead. As such, when trying to work with a program that you have not previously spawned as a child process, it can be helpful to start out dumping both `stdout` and `stderr`, as shown above, to avoid any surprises. + +While `child_process.exec` buffers the output of the child process for you, it also returns a `ChildProcess` object, which wraps a still-running process. In the example above, since we are using `ls`, a program that will exit immediately regardless, the only part of the `ChildProcess` object worth worrying about is the `on exit` handler. It is not necessary here - the process will still exit and the error code will still be shown on errors. + +**Buffering the Output** means that the output of the command is loaded into the memory before sending to `stdout` or `stderr` and as mentioned above a default of 200KB can be buffered into the memory. This feature has both pros and cons: + +Pros: + +* You can pipe the output of one command as the input to another (just like you could in Linux). Example `ls -al | grep '^package'` will show the list of all the sub-directories in the current directory that begin with the word `'package'`. + +Cons: + +* Buffering the entire data into memory will affect the process performance. +* Only a set maximum size of data can be buffered. + +There are other very useful spawning functions like: `.spawn()`, `.fork()`, `.execFile()`. + +* `child_process.spawn()`: The spawn function launches a command in a new process and you can use it to pass that command any arguments. It's the most generic spawning function and all other functions are built over it [[docs]](https://nodejs.org/api/child_process.html#child_process_child_process). +* `child_process.execFile()`: The execFile function is similar to `child_process.exec(`) except that it spawns the command directly without first spawning a shell by default [[docs]](https://nodejs.org/api/child_process.html#child_process_child_process_execfile_file_args_options_callback). +* `child_process.fork()`: The fork function spawns a new Node.js process and invokes a specified module with an IPC communication channel established that allows sending messages between parent and child [[docs]](https://nodejs.org/api/child_process.html#child_process_child_process_fork_modulepath_args_options). + +The functions `.exec()`, `.spawn()` and `.execFile()` do have their synchronous blocking versions that will wait until the child process exits namely `.execSync()`, `.spawnSync()` and `.execFileSync()` respectively. These blocking versions are particularly useful for one time startup processing tasks diff --git a/locale/fr/knowledge/command-line/how-to-get-colors-on-the-command-line.md b/locale/fr/knowledge/command-line/how-to-get-colors-on-the-command-line.md new file mode 100644 index 000000000000..eb876efc1b02 --- /dev/null +++ b/locale/fr/knowledge/command-line/how-to-get-colors-on-the-command-line.md @@ -0,0 +1,144 @@ +--- +title: How to get colors on the command line +date: '2011-08-26T10:08:50.000Z' +tags: + - cli +difficulty: 1 +layout: knowledge-post.hbs +--- + +When working on the command line, it can be both fun and extremely useful to colorize one's output. To colorize console output, you need to use ANSI escape codes. The module [colors.js](https://www.npmjs.com/package/colors), available on `npm`, provides an extremely easy to use wrapper that makes adding colors a breeze. + +First, install it to the directory you'd like to work in. + +```bash +npm install colors +``` + +Now open up a little test script for yourself, and try something like this: + +```js +const colors = require('colors'); + +const stringOne = 'This is a plain string.'; +const stringTwo = 'This string is red.'.red; +const stringThree = 'This string is blue.'.blue; +const today = new Date().toLocaleDateString(); // returns today's date in mm/dd/yyyy format + +console.log(stringOne.black.bgMagenta); +console.log(stringOne.yellow.bgRed.bold); +console.log(`Today is: ${today}`.black.bgGreen); + +console.log(stringTwo); +console.log(stringThree); + +console.log(stringTwo.magenta); +console.log(stringThree.grey.bold); +``` + +There are several things to take note of here - first, the string object has been prototyped, so any color may be added simply by adding the property to the string! It works on string literals, template literals and on variables, as shown at the top of the example above. + +Notice, also, from the second pair of `console.log` statements, that once set, a color value persists as part of the string. This is because under the hood, the proper ANSI color tags have been prepended and appended as necessary - anywhere the string gets passed where ANSI color codes are also supported, the color will remain. + +The last pair of `console.log` statements are probably the most important. Because of the way `colors.js` and ANSI color codes work, if more than one color property is set on a string, **only the first color property to be set on the string takes effect.** This is because the colors function as 'state shifts' rather than as tags. + +Let's look at a more explicit example. If you set the following properties with `colors.js`: + +```js +myString.red.blue.green +``` + +You can think of your terminal saying to itself, "Make this green. No, make this blue. No, make this red. No more color codes now? Red it is, then." The codes are read in the reverse order, and the last/'innermost' is applied. This can be extremely useful if you're using a library that sets its own default colors that you don't like - if you set a color code yourself on the string you pass in to the library, it will supersede the other author's color code(s). + +The last thing to note is the final line of the example script. While a color code was set previously, a 'bold' code was not, so the example was made bold, but not given a different color. + +### Using `colors` without changing `String.prototype` +Now an instance of `colors` can also be used. Though this approach is slightly less nifty but is beginner friendly and is specially useful if you don't want to touch `String.prototype`. Some example of this are: + +```js +const colors = require('colors'); + +const stringOne = 'This is a plain string.'; +const stringTwo = 'This string is red.'; +const stringThree = 'This string is blue.'; +const today = new Date().toLocaleDateString(); // returns today's date in mm/dd/yyyy format + +console.log(colors.bgMagenta.black(stringOne)); +console.log(colors.bold.bgRed.yellow(stringOne)); +console.log(colors.bgGreen.black(`Today is: ${today}`)); + +console.log(colors.red(stringTwo)); +console.log(colors.blue(stringThree)); + +console.log(colors.magenta.red(stringTwo)); +console.log(colors.bold.grey.black.blue(stringThree)); +``` + +Unlike the `String.prototype` approach, the chained methods on the `colors` instance are executed left to right i.e., the method closest to the string is finally applied. In the last `console.log` you can think of your terminal saying to itself, "Make this grey. Now, make this black. Now, make this blue. No more coloring methods now? Blue it is, then." + +With the latest version of `colors.js` you can also define **[Custom Themes](https://www.npmjs.com/package/colors#custom-themes)** in `color.js`, which makes our code more Robust and allows better Encapsulation of data. A nice use case of this maybe: + +```js +var colors = require('colors'); + +colors.setTheme({ + info: 'bgGreen', + help: 'cyan', + warn: 'yellow', + success: 'bgBlue', + error: 'red' +}); + +// outputs red text +console.log("this is an error".error); + +// outputs text on blue background +console.log("this is a success message".success); +``` + +One last thing: the colors can look quite different in different terminals - sometimes, `bold` is bold, sometimes it's just a different color. Try it out and see for yourself! + +For reference, here's the full list of available `colors.js` properties. + +### text colors + +* black +* red +* green +* yellow +* blue +* magenta +* cyan +* white +* gray +* grey + +### background colors + +* bgBlack +* bgRed +* bgGreen +* bgYellow +* bgBlue +* bgMagenta +* bgCyan +* bgWhite + +### styles + +* reset +* bold +* dim +* italic +* underline +* inverse +* hidden +* strikethrough + +### extras + +* rainbow +* zebra +* america +* trap +* random diff --git a/locale/fr/knowledge/command-line/how-to-parse-command-line-arguments.md b/locale/fr/knowledge/command-line/how-to-parse-command-line-arguments.md new file mode 100644 index 000000000000..611304f04106 --- /dev/null +++ b/locale/fr/knowledge/command-line/how-to-parse-command-line-arguments.md @@ -0,0 +1,127 @@ +--- +title: How to parse command line arguments +date: '2011-08-26T10:08:50.000Z' +tags: + - cli +difficulty: 1 +layout: knowledge-post.hbs +--- + +Passing in arguments via the command line is an extremely basic programming task, and a necessity for anyone trying to write a simple Command-Line Interface (CLI). In Node.js, as in C and many related environments, all command-line arguments received by the shell are given to the process in an array called `argv` (short for 'argument values'). + +Node.js exposes this array for every running process in the form of `process.argv` - let's take a look at an example. Make a file called `argv.js` and add this line: + +```js +console.log(process.argv); +``` + +Now save it, and try the following in your shell: + +```bash +$ node argv.js one two three four five +[ 'node', + '/home/avian/argvdemo/argv.js', + 'one', + 'two', + 'three', + 'four', + 'five' ] +``` + +There you have it - an array containing any arguments you passed in. Notice the first two elements - `node` and the path to your script. These will always be present - even if your program takes no arguments of its own, your script's interpreter and path are still considered arguments to the shell you're using. + +Where everyday CLI arguments are concerned, you'll want to skip the first two. Now try this in `argv.js`: + +```js +var myArgs = process.argv.slice(2); +console.log('myArgs: ', myArgs); +``` + +This yields: + +```bash +$ node argv.js one two three four +myArgs: [ 'one', 'two', 'three', 'four' ] +``` + +Now let's actually do something with the args: + +```js +var myArgs = process.argv.slice(2); +console.log('myArgs: ', myArgs); + +switch (myArgs[0]) { +case 'insult': + console.log(myArgs[1], 'smells quite badly.'); + break; +case 'compliment': + console.log(myArgs[1], 'is really cool.'); + break; +default: + console.log('Sorry, that is not something I know how to do.'); +} +``` + +JS PRO TIP: Remember to `break` after each `case` - otherwise you'll run the next case too! + +Referring to your command-line arguments by array index isn't very clean, and can quickly turn into a nightmare when you start working with flags and the like - imagine you made a server, and it needed a lot of arguments. Imagine having to deal with something like `myapp -h host -p port -r -v -b --quiet -x -o outfile` - some flags need to know about what comes next, some don't, and most CLIs let users specify arguments in any order they want. Sound like a fun string to parse? + +Luckily, there are many third party modules that makes all of this trivial - one of which is [yargs](https://www.npmjs.com/package/yargs). It's available via `npm`. Use this command from your app's base path: + +``` +npm i yargs +``` + +Once you have it, give it a try - it can really be a life-saver. Lets test it with little fun Leap Year checker and Current Time teller + +```js +const yargs = require('yargs'); + +const argv = yargs + .command('lyr', 'Tells whether an year is leap year or not', { + year: { + description: 'the year to check for', + alias: 'y', + type: 'number', + } + }) + .option('time', { + alias: 't', + description: 'Tell the present Time', + type: 'boolean', + }) + .help() + .alias('help', 'h') + .argv; + +if (argv.time) { + console.log('The current time is: ', new Date().toLocaleTimeString()); +} + +if (argv._.includes('lyr')) { + const year = argv.year || new Date().getFullYear(); + if (((year % 4 == 0) && (year % 100 != 0)) || (year % 400 == 0)) { + console.log(`${year} is a Leap Year`); + } else { + console.log(`${year} is NOT a Leap Year`); + } +} + +console.log(argv); +``` + +The last line was included to let you see how `yargs` handles your arguments. Here's a quick reference: + +* `argv.$0` contains the name of the script file which is executed like: `'$0': 'myapp.js'`. +* `argv._` is an array containing each element not attached to an option(or flag) these elements are referred as `commands` in yargs. +* Individual options(flags) become properties of `argv`, such as with `argv.h` and `argv.time`. Note that non-single-letter flags must be passed in as `--flag` like: `node myapp.js --time`. + +A summary of elements used in the program: + +* **argv**: This is the modified `process.argv` which we have configured with yargs. +* **command()**: This method is used to add commands, their description and options which are specific to these commands only, like in the above code `lyr` is the command and `-y` is lyr specific option: `node myapp.js lyr -y 2016` +* **option()**: This method is used to add global options(flags) which can be accessed by all commands or without any command. +* **help()**: This method is used to display a help dialogue when `--help` option is encountered which contains description of all the `commands` and `options` available. +* **alias()**: This method provides an alias name to an option, like in the above code both `--help` and `-h` triggers the help dialogue. + +For more information on yargs and the many, many other things it can do for your command-line arguments, please visit [http://yargs.js.org/docs/](http://yargs.js.org/docs/) diff --git a/locale/fr/knowledge/command-line/how-to-prompt-for-command-line-input.md b/locale/fr/knowledge/command-line/how-to-prompt-for-command-line-input.md new file mode 100644 index 000000000000..41d2b189fddc --- /dev/null +++ b/locale/fr/knowledge/command-line/how-to-prompt-for-command-line-input.md @@ -0,0 +1,106 @@ +--- +title: How do I prompt users for input from a command-line script? +date: '2011-08-26T10:08:50.000Z' +tags: + - javascript + - core + - cli +difficulty: 2 +layout: knowledge-post.hbs +--- + +So you've got a little CLI tool, but you want to be able to prompt a user for additional data after the script has started, rather than passing it in as a command line argument or putting it in a file. To do this, you'll need to listen to STDIN ("standard input", i.e. your keyboard), which Node.js exposes for you as `process.stdin`, a readable stream. + +Streams are the Node.js way of dealing with evented I/O - it's a big topic, and you can read more about them [here](https://nodejs.org/api/stream.html). For now, we're going to use the built-in `readline` module which is a wrapper around Standard I/O, suitable for taking user input from command line(terminal). + +Here's a simple example. Try the following in a new file: + +```js +const readline = require("readline"); +const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout +}); + +rl.question("What is your name ? ", function(name) { + rl.question("Where do you live ? ", function(country) { + console.log(`${name}, is a citizen of ${country}`); + rl.close(); + }); +}); + +rl.on("close", function() { + console.log("\nBYE BYE !!!"); + process.exit(0); +}); +``` + +In the above code `readline.createInterface()` is used for creating an instance of `readline` by configuring the readable and the writable streams. The `input` key takes a readable stream like `process.stdin` or `fs.createReadStream('file.txt')` and the `output` key takes a writable stream like `process.stdout` or `process.stderr`. + +The `rl.question()` method displays the query by writing it to the `output`, waits for user input to be provided on `input`, then invokes the `callback` function passing the provided input as the first argument. + +NODE PRO TIP: Do remember to use `rl.close()` to close the transmitting otherwise the process will be left in the `idle` state. + +The last part of the code uses `rl.on()` method to add an event listener to the `close` event which simply `console.log` to the output stream and exits the process. This part is completely optional and can be removed at will. For more in-depth details and usage refer to the docs [here](https://nodejs.org/api/readline.html). + +If all of this sounds complicated, or if you want a higher-level interface to this sort of thing, don't worry - as usual, the Node.js community has come to the rescue. One particularly friendly module to use for this is `prompt`, available on `npm`: + +```bash +npm install prompt +``` + +Prompt is built to be easy - if your eyes started to glaze over as soon as you saw `Readable Stream`, then this is the section for you. Compare the following to the example above: + +```js +const prompt = require('prompt'); + +prompt.start(); + +prompt.get(['username', 'email'], function (err, result) { + if (err) { return onErr(err); } + console.log('Command-line input received:'); + console.log(' Username: ' + result.username); + console.log(' Email: ' + result.email); +}); + +function onErr(err) { + console.log(err); + return 1; +} +``` + +NODE PRO TIP: This short script also demonstrates proper error handling in node - errors are a callback's first argument, and `return` is used with the error handler so that the rest of the function doesn't execute when errors happen. + +Prompt also makes it trivial to handle a certain set of recurring properties that one might want to attach. + +```js +const prompt = require('prompt'); + +const properties = [ + { + name: 'username', + validator: /^[a-zA-Z\s\-]+$/, + warning: 'Username must be only letters, spaces, or dashes' + }, + { + name: 'password', + hidden: true + } +]; + +prompt.start(); + +prompt.get(properties, function (err, result) { + if (err) { return onErr(err); } + console.log('Command-line input received:'); + console.log(' Username: ' + result.username); + console.log(' Password: ' + result.password); +}); + +function onErr(err) { + console.log(err); + return 1; +} +``` + +For more information on Prompt, please see [the project's GitHub page](https://github.com/flatiron/prompt). diff --git a/locale/fr/knowledge/cryptography/how-to-use-crypto-module.md b/locale/fr/knowledge/cryptography/how-to-use-crypto-module.md new file mode 100644 index 000000000000..9811383edd6e --- /dev/null +++ b/locale/fr/knowledge/cryptography/how-to-use-crypto-module.md @@ -0,0 +1,163 @@ +--- +title: How to use the crypto module +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - crypto +difficulty: 3 +layout: knowledge-post.hbs +--- + +The [crypto](https://nodejs.org/api/crypto.html) module is a wrapper for [OpenSSL](https://en.wikipedia.org/wiki/Openssl) cryptographic functions. It supports calculating hashes, authentication with HMAC, ciphers, and more! + +The crypto module is mostly useful as a tool for implementing [cryptographic protocols](https://en.wikipedia.org/wiki/Cryptographic_protocol) such as [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security) and [https](https://en.wikipedia.org/wiki/Https). For most users, the built-in [tls module](https://nodejs.org/api/tls.html) and [https module](https://nodejs.org/api/https.html) should more than suffice. However, for the user that only wants to use small parts of what's needed for full-scale cryptography or is crazy/desperate enough to implement a protocol using OpenSSL and Node.js: Read on. + +## Hashes + +### What Is A Hash? + +A hash is a fixed-length string of bits that is procedurally and deterministically generated from some arbitrary block of source data. Some important properties of these hashes (the type useful for cryptography) include: + +* **Fixed length:** This means that, no matter what the input, the length of the hash is the same. For example, SHA-256 hashes are always 256 bits long whether the input data is a few bits or a few gigabytes. + +* **Deterministic:** For the same input, you should expect to be able to calculate exactly the same hash. This makes hashes useful for checksums. + +* **Collision-Resistant:** A collision is when the same hash is generated for two different input blocks of data. Hash algorithms are designed to be extremely unlikely to have collisions -- just how unlikely is a property of the hash algorithm. The importance of this property depends on the use case. + +* **Unidirectional:** A good hash algorithm is easy to apply, but hard to undo. This means that, given a hash, there isn't any reasonable way to find out what the original piece of data was. + +### Hash Algorithms That Work With Crypto + +The hashes that work with crypto are dependent on what your version of OpenSSL supports. If you have a new enough version of OpenSSL, you can get a list of hash types your OpenSSL supports by typing `openssl list-message-digest-algorithms` into the command line. For older versions, simply type `openssl list-message-digest-commands` instead! + +One of the most common hash algorithms is [SHA-256](https://en.wikipedia.org/wiki/SHA-2). Older popular types like **[SHA-1](https://en.wikipedia.org/wiki/Sha1) or [MD5](https://en.wikipedia.org/wiki/MD5#Security) are not secure any more** and should not be used. + +### How To Calculate Hashes with Crypto + +Crypto has a method called `createHash` which allows you to calculate a hash. Its only argument is a string representing the hash This example finds the SHA-256 hash for the string, "Man oh man do I love node!": + +```js +require("crypto") + .createHash("sha256") + .update("Man oh man do I love node!") + .digest("hex"); +``` + +The `update` method is used to push data to later be turned into a hash with the `digest` method. `update` can be invoked multiple times to ingest streaming data, such as buffers from a file read stream. The argument for `digest` represents the output format, and may either be "binary", "hex" or "base64". It defaults to binary. + +## HMAC + +HMAC stands for Hash-based Message Authentication Code, and is a process for applying a hash algorithm to both data and a secret key that results in a single final hash. Its use is similar to that of a vanilla hash, but also allows to check the *authenticity* of data as *well* as the integrity of said data (as you can using SHA-256 checksums). + +The API for hmacs is very similar to that of `createHash`, except that the method is called `createHmac` and it takes a key as a second argument: + +```js +require("crypto").createHmac("sha256", "password") + .update("If you love node so much why don't you marry it?") + .digest("hex"); +``` + +The resulting SHA-256 hash is unique to both the input data and the key. + +## Ciphers + +Ciphers allow you to encode and decode messages given a password. + +### Cipher Algorithms That Work With Crypto + +Like crypto's hash algorithms, the cyphers that work with crypto are dependent on what your version of OpenSSL supports. You can get a list of hash types your OpenSSL supports by typing `openssl list-cipher-commands` into the command line for older versions, or `openssl list-cipher-algorithms` for newer versions of OpenSSL. OpenSSL supports *many* ciphers; A good and popular one is [AES_256](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard). + +### How To Use Cipher Algorithms with Crypto: + +Crypto comes with two methods for ciphering and deciphering: + +* `crypto.createCipheriv(algorithm, key, iv)` +* `crypto.createDecipheriv(algorithm, key, iv)` + +Both of these methods take arguments similarly to `createHmac`. They also both have analogous `update` functions. However, each use of `update` returns a chunk of the encoded/decoded data instead of requiring one to call `digest` to get the result. Moreover, after encoding (or decoding) your data, you will likely have to call the `final` method to get the last chunk of encoded information. + +Another important addition in the cipher method is of the `iv` or [initialization vector](https://en.wikipedia.org/wiki/Initialization_vector). Initialization vectors should be unpredictable and unique, typically required to be random or pseudorandom. Randomization is crucial for encryption schemes to achieve semantic security, a property whereby repeated usage of the scheme under the same key does not allow an attacker to infer relationships between segments of the encrypted message. + +Here's an example, slightly less trivial than previous examples, that uses crypto and [yargs](https://github.com/yargs/yargs) to encode and decode messages from the command line: + +```js +#!/usr/bin/env node + +const crypto = require('crypto'), + argv = require("yargs").argv, + resizedIV = Buffer.allocUnsafe(16), + iv = crypto + .createHash("sha256") + .update("myHashedIV") + .digest(); + +iv.copy(resizedIV); + +if (argv.e && argv.key) { + const key = crypto + .createHash("sha256") + .update(argv.key) + .digest(), + cipher = crypto.createCipheriv("aes256", key, resizedIV), + msg = []; + + argv._.forEach( function (phrase) { + msg.push(cipher.update(phrase, "binary", "hex")); + }); + + msg.push(cipher.final("hex")); + console.log(msg.join("")); + +} else if (argv.d && argv.key) { + const key = crypto + .createHash("sha256") + .update(argv.key) + .digest(), + decipher = crypto.createDecipheriv("aes256", key, resizedIV), + msg = []; + + argv._.forEach( function (phrase) { + msg.push(decipher.update(phrase, "hex", "binary")); + }); + + msg.push(decipher.final("binary")); + console.log(msg.join("")); +} +``` + +NODE PRO TIP: The `crypto.createCipheriv()` and `crypto.createDecipheriv()` methods do not take a password, rather a `key` and an `iv` which are combined together to form a random password. The size of the `key` and `iv` depends on the chosen algorithm. A reference to common algorithms and their `key` and `iv` size is given below: + +| Algorithm | Key | iv | +| ----------- | ------------------ | ------------------ | +| aes128 | 16 byte (128 bits) | 16 byte (128 bits) | +| aes-128-cbc | 16 byte (128 bits) | 16 byte (128 bits) | +| aes192 | 24 byte (192 bits) | 16 byte (128 bits) | +| aes256 | 32 byte (256 bits) | 16 byte (128 bits) | + +In the code above The user entered `key` is hashed using `SHA-256 encryption` which produces a 32 byte buffer by default, this buffered key is then used as the [cryptographic key](https://en.wikipedia.org/wiki/Key_(cryptography)) in the `crypto.createCipheriv()` and `crypto.createDecipheriv()` methods. The `iv` is also hashed with `SHA-256 encryption` and is 32 byte in size but all AES (CBC mode and CFB mode) take `iv` of exactly 16 byte (128 bits) therefor another Buffer `resizedIV` is used which contains the first 16 byte of original 32 byte `iv`. + +Using this script to encode a message looks like this: + +```bash +$ node ./secretmsg.js -e --key="popcorn" "My treasure is buried behind Carl's Jr. on Telegraph." +c8c78895fd91da17cca9cf0d28e742c6077fb5a89ef5cdc23d9c37c96c5fb7f321d7f52c06e73c46633783d9535e2aa5cc07f2ad1803d73614c4e6882026bfd9 +``` + +Now, if I gave somebody the same script, my encoded message and the key, they can decode the message and find out where I buried my treasure: + +```bash +$ node ./secretmsg.js -d --key="popcorn" c8c78895fd91da17cca9cf0d28e742c6077fb5a89ef5cdc23d9c37c96c5fb7f321d7f52c06e73c46633783d9535e2aa5cc07f2ad1803d73614c4e6882026bfd9 +My treasure is buried behind Carl's Jr. on Telegraph. +``` + +You should know that what I buried behind Carl's Jr was just a cigarette butt, and that this script is obviously not for serious use. + +## Signing and Verification + +Crypto has other methods used for dealing with certificates and credentials, as used for TLS: + +* `crypto.createCredentials` +* `crypto.createSign` +* `crypto.createVerify` + +These methods supply the last building blocks for a complete cryptographic protocol, and require an advanced knowledge of real-world cryptographic protocols to be useful. Again, it is recommended that developers use either the [tls](https://nodejs.org/api/tls.html) module or the [https](https://nodejs.org/api/https.html) module if applicable. diff --git a/locale/fr/knowledge/cryptography/how-to-use-the-tls-module.md b/locale/fr/knowledge/cryptography/how-to-use-the-tls-module.md new file mode 100644 index 000000000000..ac5cf92bd1ee --- /dev/null +++ b/locale/fr/knowledge/cryptography/how-to-use-the-tls-module.md @@ -0,0 +1,171 @@ +--- +title: How To Use The TLS Module +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - tls + - ssl + - secure +difficulty: 3 +layout: knowledge-post.hbs +--- + +## What is TLS? + +[Transport Layer Security](https://en.wikipedia.org/wiki/Transport_Layer_Security) (or TLS) is the successor to Secure Sockets Layer (or SSL). It, along with SSL, are the de-facto standard cryptographic protocols for secure communications over the web. TLS encrypts communications on top of a network transport layer (typically tcp), and uses public-key cryptography to encrypt messages. + +### Public-Key Cryptography + +In public-key cryptography, each peer has two keys: A public key, and a private key. The public key is shared with everyone, and the private key is (naturally) kept secret. In order to encrypt a message, a computer requires its private key and the recipient's public key. Then, in order to decrypt the message, the recipient requires its *own* private key and the *sender*'s public key. + +In TLS connections, the public key is called a *[certificate](https://en.wikipedia.org/wiki/Digital_certificate)*. This is because it's "[signed](https://en.wikipedia.org/wiki/Digital_signature)" to prove that the public key belongs to its owner. TLS certificates may either be signed by a third-party certificate authority (CA), or they may be [self-signed](https://en.wikipedia.org/wiki/Self-signed_certificate). In the case of Certificate Authorities, Mozilla keeps [a list of trusted root CAs](http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt) that are generally agreed upon by most web browsers. These root CAs may then issue certificates to other signing authorities, which in turn sign certificates for the general public. + +### History of TLS/SSL Support in Node.js + +TLS support in node is relatively new. The first stable version of Node.js to support TLS and HTTPS was the v0.4 branch, which was released in early 2011. Since then, the primary focus of the core developers has shifted from TLS/HTTPS to Windows support in the v0.5 branch. As such, the TLS APIs in node are still a little rough around the edges, and documentation leaves something to be desired. + +## The tls Module + +### tls.createServer + +In most ways, the tls module's server api is similar to that of the net module. Besides the fact that it's for encrypted connections, the major difference is that the options object passed to `tls.connect` or `tls.createServer` needs to include information on both the private key and the certificate, in [pem format](https://en.wikipedia.org/wiki/X.509#Certificate_filename_extensions). Here's an example of a tls server: + +```javascript +var tls = require('tls'), + fs = require('fs'), + colors = require('colors'), + msg = [ + ".-..-..-. .-. .-. .--. .---. .-. .---. .-.", + ": :; :: : : :.-.: :: ,. :: .; :: : : . :: :", + ": :: : : :: :: :: :: :: .': : : :: :: :", + ": :: :: : : `' `' ;: :; :: :.`.: :__ : :; ::_;", + ":_;:_;:_; `.,`.,' `.__.':_;:_;:___.':___.':_;" + ].join("\n").cyan; + +var options = { + key: fs.readFileSync('private-key.pem'), + cert: fs.readFileSync('public-cert.pem') +}; + +tls.createServer(options, function (s) { + s.write(msg+"\n"); + s.pipe(s); +}).listen(8000); +``` + +In this example, a "hello world" tls server is created, listening on port 8000. The options object includes two properties: `key` and `cert`. The contents of these properties come directly from the private key and public certificate stored on the filesystem. In this case they are binary buffers, but the tls module can also accept unicode strings. + +### Generating Your Private Key And Certificate With OpenSSL: + +In order for this example server to work, of course, you will need a private key and a certificate. You can generate both of these with OpenSSL. + +First, generate a private key: + +``` +$ openssl genrsa -out private-key.pem 1024 +Generating RSA private key, 1024 bit long modulus +......................................++++++ +........++++++ +e is 65537 (0x10001) +``` + +This creates a suitable private key and writes it to `./private-key.pem`. + +Next, create a Certificate Signing Request file using your private key: + +``` +$ openssl req -new -key private-key.pem -out csr.pem +You are about to be asked to enter information that will be incorporated +into your certificate request. +What you are about to enter is what is called a Distinguished Name or a DN. +There are quite a few fields but you can leave some blank +For some fields there will be a default value, +If you enter '.', the field will be left blank. +----- +Country Name (2 letter code) [AU]:US +State or Province Name (full name) [Some-State]:California +Locality Name (eg, city) []:Oakland +Organization Name (eg, company) [Internet Widgits Pty Ltd]:Panco, Inc. +Organizational Unit Name (eg, section) []: +Common Name (eg, YOUR name) []:Joshua Holbrook +Email Address []:josh.holbrook@gmail.com + +Please enter the following 'extra' attributes +to be sent with your certificate request +A challenge password []:dangerface +An optional company name []: +``` + +The purpose of this CSR is to "request" a certificate. That is, if you wanted a CA to sign your certificate, you could give this file to them to process and they would give you back a certificate. + +Alternately, however, you may self-sign your certificate, again using your private key: + +``` +$ openssl x509 -req -in csr.pem -signkey private-key.pem -out public-cert.pem +Signature ok +subject=/C=US/ST=California/L=Oakland/O=Panco, Inc./CN=Joshua Holbrook/emailAddress=josh.holbrook@gmail.com +Getting Private key +``` + +This generates your certificate. Now you're cooking! + +### Trying it out: + +One way to test out your new "hello world" server is to again use OpenSSL: + +``` +openssl s_client -connect 127.0.0.1:8000 +``` + +You should see a bunch of output regarding the handshaking process, and then at the very end you should see a big, cyan figlet banner saying, "Hi world!" + +### tls.connect + +The tls module also supplies tools for connecting to such a server: + +```javascript +var tls = require('tls'), + fs = require('fs'); + +var options = { + key: fs.readFileSync('private-key.pem'), + cert: fs.readFileSync('public-cert.pem') +}; + +var conn = tls.connect(8000, options, function() { + if (conn.authorized) { + console.log("Connection authorized by a Certificate Authority."); + } else { + console.log("Connection not authorized: " + conn.authorizationError) + } + console.log(); +}); + +conn.on("data", function (data) { + console.log(data.toString()); + conn.end(); +}); +``` + +The idea is similar, except instead of creating a server, this script connects to one instead. `tls.connect` also takes an options object, but then returns a stream. + +`tls.connect` also fires a callback when the connection is made, which allows for checking to see if the connection is authorized---that is, if all the certificates are in order. `conn.authorized` is a boolean, and `conn.authorizationError` is a string containing the reason that the connection is unauthorized. + +This is what happens when the client is ran (with the server running): + +``` +$ node client.js +Connection not authorized: DEPTH_ZERO_SELF_SIGNED_CERT + +.-..-..-. .-. .-. .--. .---. .-. .---. .-. +: :; :: : : :.-.: :: ,. :: .; :: : : . :: : +: :: : : :: :: :: :: :: .': : : :: :: : +: :: :: : : `' `' ;: :; :: :.`.: :__ : :; ::_; +:_;:_;:_; `.,`.,' `.__.':_;:_;:___.':___.':_; +``` + +Note that self-signing the server certificate results in a non-authorized status because you're not listed as a trusted certificate authority. + +## "starttls" + +It's entirely possible to "upgrade" an existing tcp connection into a TLS-encrypted one with node. However, node does not have a special functions for doing so as of the v0.4 branch. Therefore, it needs to be done "by-hand", using the crypto module and some undocumented tls module functionality. The Node.js documentation points to , which aims to abstract the process. diff --git a/locale/fr/knowledge/errors/what-are-the-error-conventions.md b/locale/fr/knowledge/errors/what-are-the-error-conventions.md new file mode 100644 index 000000000000..36f09ec10e1a --- /dev/null +++ b/locale/fr/knowledge/errors/what-are-the-error-conventions.md @@ -0,0 +1,52 @@ +--- +title: What are the error conventions? +date: '2011-08-26T10:08:50.000Z' +tags: + - errors + - conventions +difficulty: 1 +layout: knowledge-post.hbs +--- + +In Node.js, it is considered standard practice to handle errors in asynchronous functions by returning them as the first argument to the current function's callback. If there is an error, the first parameter is passed an `Error` object with all the details. Otherwise, the first parameter is null. + +It's simpler than it sounds; let's demonstrate. + +```javascript +var isTrue = function(value, callback) { + if (value === true) { + callback(null, "Value was true."); + } + else { + callback(new Error("Value is not true!")); + } +} + +var callback = function (error, retval) { + if (error) { + console.log(error); + return; + } + console.log(retval); +} + +// Note: when calling the same asynchronous function twice like this, you are in a race condition. +// You have no way of knowing for certain which callback will be called first when calling the functions in this manner. + +isTrue(false, callback); +isTrue(true, callback); +``` + +``` +{ stack: [Getter/Setter], + arguments: undefined, + type: undefined, + message: 'Value is not true!' } +Value was true. +``` + +As you can see from the example, the callback is called with null as its first argument if there is no error. However, if there is an error, you create an `Error` object, which then becomes the callback's only parameter. + +The `callback` function shows the reason for this: it allows a user to easily know whether or not an error occurred. If `null` was not the first argument passed on success, the user would need to check the object being returned and determine themselves whether or not the object constituted an error - a much more complex and less user-friendly approach. + +So to wrap it all up, when using callbacks, if an error comes up, then pass it as the first argument. Otherwise, pass `null` first, and then your return arguments. On the receiving end, inside the callback function, check if the first parameter is non-null; if it is, handle it as an error. diff --git a/locale/fr/knowledge/errors/what-is-the-error-object.md b/locale/fr/knowledge/errors/what-is-the-error-object.md new file mode 100644 index 000000000000..e2802f837993 --- /dev/null +++ b/locale/fr/knowledge/errors/what-is-the-error-object.md @@ -0,0 +1,48 @@ +--- +title: What is the error object? +date: '2011-08-26T10:08:50.000Z' +tags: + - errors + - builtin +difficulty: 1 +layout: knowledge-post.hbs +--- + +The error object is a built-in object that provides a standard set of useful information when an error occurs, such as a stack trace and the error message. For example: + +Code: + +```javascript +var error = new Error("The error message"); +console.log(error); +console.log(error.stack); +``` + +Result: + +``` +{ stack: [Getter/Setter], + arguments: undefined, + type: undefined, + message: 'The error message' } +Error: The error message + at Object. (/home/nico/example.js:1:75) + at Module._compile (module.js:407:26) + at Object..js (module.js:413:10) + at Module.load (module.js:339:31) + at Function._load (module.js:298:12) + at Array.0 (module.js:426:10) + at EventEmitter._tickCallback (node.js:126:26) +``` + +`error.stack` shows you where an error came from, as well as a list of the function calls that preceded it - for your convenience, `error.stack` always prints `error.message` as the first line of its output, making `error.stack` a convenient single property to log during debugging. + +If you want to add more information to the Error object, you can always add properties, just as with any other JavaScript object: + +```javascript +var error = new Error("The error message"); +error.http_code = 404; +console.log(error); +``` + +For more details how to use the Error object, check out the [article on error conventions](/en/knowledge/errors/what-are-the-error-conventions/) diff --git a/locale/fr/knowledge/errors/what-is-try-catch.md b/locale/fr/knowledge/errors/what-is-try-catch.md new file mode 100644 index 000000000000..f43b61530170 --- /dev/null +++ b/locale/fr/knowledge/errors/what-is-try-catch.md @@ -0,0 +1,53 @@ +--- +title: What is try-catch? +date: '2011-08-26T10:08:50.000Z' +tags: + - errors + - builtin +difficulty: 1 +layout: knowledge-post.hbs +--- + +Example: + +```javascript +console.log("entering try-catch statement"); + +try { + console.log("entering try block"); + throw "thrown message"; + console.log("this message is never seen"); +} +catch (e) { + console.log("entering catch block"); + console.log(e); + console.log("leaving catch block"); +} +finally { + console.log("entering and leaving the finally block"); +} + +console.log("leaving try-catch statement"); +``` + +Results: + +``` +entering try-catch statement +entering try block +entering catch block +thrown message +leaving catch block +entering and leaving the finally block +leaving try-catch statement +``` + +JavaScript's `try-catch-finally` statement works very similarly to the `try-catch-finally` encountered in C++ and Java. First, the try block is executed until and unless the code in it throws an exception (whether it is an explicit `throw` statement, the code has an uncaught native exception, or if the code calls a function that uses `throw`). + +If the code doesn't throw an exception, then the whole try block is executed. If the code threw an exception inside the try block, then the catch block is executed. Last of all, the finally block is always executed, subsequent to the other blocks but prior to any subsequent code located outside of the `try-catch-finally` blocks. The `finally` block will just about always execute, no matter what kind of throwing, catching, or returning one might be trying to do inside the `try` or `catch` blocks. + +Note that you can omit the `catch` or `finally` block, but one of them must be present. + +## But wait, isn't it Node.js convention to not use try-catch? + +In the core Node.js libraries, the only place that one really *needs* to use a try-catch is around `JSON.parse()`. All of the other methods use either the standard Error object through the first parameter of the callback or emit an `error` event. Because of this, it is generally considered [standard](/en/knowledge/errors/what-are-the-error-conventions/) to return errors through the callback rather than to use the `throw` statement. diff --git a/locale/fr/knowledge/file-system/how-to-read-files-in-nodejs.md b/locale/fr/knowledge/file-system/how-to-read-files-in-nodejs.md new file mode 100644 index 000000000000..38f6f2c8b2e4 --- /dev/null +++ b/locale/fr/knowledge/file-system/how-to-read-files-in-nodejs.md @@ -0,0 +1,61 @@ +--- +title: How do I read files in Node.js? +date: '2011-08-26T10:08:50.000Z' +tags: + - filesystem +difficulty: 2 +layout: knowledge-post.hbs +--- + +Reading the contents of a file into memory is a very common programming task, and, as with many other things, the Node.js core API provides methods to make this trivial. There are a variety of file system methods, all contained in the `fs` module. The easiest way to read the entire contents of a file is with `fs.readFile`, as follows: + +```javascript +fs = require('fs'); +fs.readFile(file, [encoding], [callback]); + +// file = (string) filepath of the file to read +``` + +`encoding` is an optional parameter that specifies the type of encoding to read the file. Possible encodings are 'ascii', 'utf8', and 'base64'. If no encoding is provided, the default is `null`. + +`callback` is a function to call when the file has been read and the contents are ready - it is passed two arguments, `error` and `data`. If there is no error, `error` will be `null` and `data` will contain the file contents; otherwise `err` contains the error message. + +So if we wanted to read `/etc/hosts` and print it to stdout (just like Unix `cat`): + +```javascript +fs = require('fs') +fs.readFile('/etc/hosts', 'utf8', function (err,data) { + if (err) { + return console.log(err); + } + console.log(data); +}); +``` + +The contents of `/etc/hosts` should now be visible to you, provided you have permission to read the file in the first place. + +Let's now take a look at an example of what happens when you try to read an invalid file - the easiest example is one that doesn't exist. + +```javascript +fs = require('fs'); +fs.readFile('/doesnt/exist', 'utf8', function (err,data) { + if (err) { + return console.log(err); + } + console.log(data); +}); +``` + +This is the output: + +``` +{ stack: [Getter/Setter], + arguments: undefined, + type: undefined, + message: 'ENOENT, No such file or directory \'/doesnt/exist\'', + errno: 2, + code: 'ENOENT', + path: '/doesnt/exist' } +``` + +This is a basic Node.js [Error object](/en/knowledge/errors/what-is-the-error-object/) - it can often be useful to log `err.stack` directly, since this contains a stack trace to the location in code at which the Error object was created. diff --git a/locale/fr/knowledge/file-system/how-to-search-files-and-directories-in-nodejs.md b/locale/fr/knowledge/file-system/how-to-search-files-and-directories-in-nodejs.md new file mode 100644 index 000000000000..31940f5536c6 --- /dev/null +++ b/locale/fr/knowledge/file-system/how-to-search-files-and-directories-in-nodejs.md @@ -0,0 +1,47 @@ +--- +title: How do I search files and directories? +date: '2011-08-26T10:08:50.000Z' +tags: + - filesystem +difficulty: 1 +layout: knowledge-post.hbs +--- + +Suppose you want to list all the files in the current directory. One approach is to use the builtin `fs.readdir` [method](/en/knowledge/file-system/how-to-read-files-in-nodejs/). This will get you an array of all the files and directories on the specified path: + +```javascript +fs = require('fs'); + +fs.readdir(process.cwd(), function (err, files) { + if (err) { + console.log(err); + return; + } + console.log(files); +}); +``` + +Unfortunately, if you want to do a recursive list of files, then things get much more complicated very quickly. To avoid all of this scary complexity, this is one of the places where a Node.js user-land library can save the day. [Node-findit](https://github.com/substack/node-findit), by SubStack, is a helper module to make searching for files easier. It has interfaces to let you work with callbacks, events, or just plain old synchronously (not a good idea most of the time). + +To install `node-findit`, simply use npm: + +``` +npm install findit +``` + +In the same folder, create a file called `example.js`, and then add this code. Run it with `node example.js`. This example uses the `node-findit` event-based interface. + +```javascript +//This sets up the file finder +var finder = require('findit').find(__dirname); + +//This listens for directories found +finder.on('directory', function (dir) { + console.log('Directory: ' + dir + '/'); +}); + +//This listens for files found +finder.on('file', function (file) { + console.log('File: ' + file); +}); +``` diff --git a/locale/fr/knowledge/file-system/how-to-store-local-config-data.md b/locale/fr/knowledge/file-system/how-to-store-local-config-data.md new file mode 100644 index 000000000000..c26f980797fc --- /dev/null +++ b/locale/fr/knowledge/file-system/how-to-store-local-config-data.md @@ -0,0 +1,91 @@ +--- +title: How to store local configuration data +date: '2011-08-26T10:08:50.000Z' +tags: + - conventions + - filesystem +difficulty: 1 +layout: knowledge-post.hbs +--- + +Storing your Node.js application's configuration data is quite simple - every object in JavaScript can be easily rendered as [JSON](/en/knowledge/javascript-conventions/what-is-json/), which in turn is just string data that can be sent or saved any way you'd like. The simplest way to do this involves the built-in `JSON.parse()` and `JSON.stringify()` methods. + +Let's take a look at a very simple (and contrived) example. First, to save some very simple data: + +```javascript +var fs = require('fs'); + +var myOptions = { + name: 'Avian', + dessert: 'cake' + flavor: 'chocolate', + beverage: 'coffee' +}; + +var data = JSON.stringify(myOptions); + +fs.writeFile('./config.json', data, function (err) { + if (err) { + console.log('There has been an error saving your configuration data.'); + console.log(err.message); + return; + } + console.log('Configuration saved successfully.') +}); +``` + +It's really that simple - just `JSON.stringify()` and then save it however you'd like. + +Now let's load some configuration data: + +```javascript +var fs = require('fs'); + +var data = fs.readFileSync('./config.json'), + myObj; + +try { + myObj = JSON.parse(data); + console.dir(myObj); +} +catch (err) { + console.log('There has been an error parsing your JSON.') + console.log(err); +} +``` + +NODE PRO TIP: Even if you don't like using `try/catch`, this is a place to use it. `JSON.parse` is a very strict JSON parser, and errors are common - most importantly, though, `JSON.parse` uses the `throw` statement rather than giving a callback, so `try/catch` is the only way to guard against the error. + +Using the built-in `JSON` methods can take you far, but as with so many other problems you might be looking to solve with Node.js, there is already a solution in Userland that can take you much further. The solution, in this case, is `nconf`. Written by Charlie Robbins, it's a configuration manager for Node.js, supporting in-memory storage, local file storage, as well as support for a `redis` backend, provided in a separate module. + +Let's take a look now at how we'd perform some local configuration access with `nconf`. First, you'll need to install it to your project's working directory: + +``` +npm install nconf +``` + +After that, the syntax is a breeze. Have a look at an example: + +```javascript +var nconf = require('nconf'); + +nconf.use('file', { file: './config.json' }); +nconf.load(); +nconf.set('name', 'Avian'); +nconf.set('dessert:name', 'Ice Cream'); +nconf.set('dessert:flavor', 'chocolate'); + +console.log(nconf.get('dessert')); + +nconf.save(function (err) { + if (err) { + console.error(err.message); + return; + } + console.log('Configuration saved successfully.'); +}); +``` + +The only tricky thing to notice here is the delimiter - ':'. When accessing nested properties with `nconf`, a colon is used to delimit the namespaces of key names. If a specific sub-key is not provided, the whole object is set or returned. + +When using `nconf` to store your configuration data to a file, `nconf.save()` and `nconf.load()` are the only times that any actual file interaction will happen. All other access is performed on an in-memory copy of your data, which will not persist without a call to `nconf.save()`. Similarly, if you're trying to bring back configuration data from the last time your application ran, it will not exist in memory without a call to `nconf.load()`, as shown above. diff --git a/locale/fr/knowledge/file-system/how-to-use-the-path-module.md b/locale/fr/knowledge/file-system/how-to-use-the-path-module.md new file mode 100644 index 000000000000..39c993ffc309 --- /dev/null +++ b/locale/fr/knowledge/file-system/how-to-use-the-path-module.md @@ -0,0 +1,81 @@ +--- +title: How to use the path module? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - filesystem +difficulty: 1 +layout: knowledge-post.hbs +--- + +The path module contains several helper functions to help make path manipulation easier. + +The first function worth mentioning is `path.normalize`. This function takes a path (in the form of a string) and strips it of duplicate slashes and normalizes directory abbreviations, like '.' for 'this directory' and '..' for 'one level up'. For example: + +``` +> var path = require('path'); +> path.normalize('/a/.///b/d/../c/') +'/a/b/c/' +``` + +A closely related function to `normalize` is `join`. This function takes a variable number of arguments, joins them together, and normalizes the path. + +``` +> var path = require('path'); +> path.join('/a/.', './//b/', 'd/../c/') +'/a/b/c' +``` + +A possible use of `join` is to manipulate paths when serving urls: + +``` +> var path = require('path'); +> var url = '/index.html'; +> path.join(process.cwd(), 'static', url); +'/home/nico/static/index.html' +``` + +There are three functions which are used to extract the various parts of the path name: `basename`, `extname`, and `dirname`. + +* `basename` returns the last portion of the path passed in. +* `extname` returns the extension of the last portion. Generally for directories, `extname` just returns ''. +* Finally, `dirname` returns everything that `basename` does not return. + +For example: + +``` +> var path = require('path') +> var a = '/a/b/c.html' +> path.basename(a) +'c.html' +> path.extname(a) +'.html' +> path.dirname(a) +'/a/b' +``` + +Note that `basename` has an optional second parameter that will strip out the extension if you pass the correct extension. + +``` +> var path = require('path') +> var a = '/a/b/c.html' +> path.basename(a, path.extname(a)) +'c' +``` + +Lastly, the `path` module provides methods to check whether or not a given path exists: `exists` and `existsSync` They both take the path of a file for the first parameter. + +`exists` takes a callback as its second parameter, to which is returned a boolean representing the existence of the file. + +`existsSync`, on the other hand, checks the given path synchronously, returning the boolean directly. In Node.js, you will typically want to use the asynchronous functions for most file system I/O - the synchronous versions will block your entire process until they finish. + +Blocking isn't always a bad thing. Checking the existence of a vital configuration file synchronously makes sense, for example - it doesn't matter much if your process is blocking for something it can't run without! Conversely, though, in a busy HTTP server, any per-request file I/O **MUST** be asynchronous, or else you'll be responding to requests one by one. See the article on [asynchronous operations](/en/knowledge/getting-started/control-flow/how-to-write-asynchronous-code/) for more details. + +``` +> var path = require('path') +> path.exists('/etc', function(exists){console.log("Does the file exist?", exists)}) +> Does the file exist? true + +> path.existsSync('/etc') +true +``` diff --git a/locale/fr/knowledge/file-system/how-to-write-files-in-nodejs.md b/locale/fr/knowledge/file-system/how-to-write-files-in-nodejs.md new file mode 100644 index 000000000000..841da398edb2 --- /dev/null +++ b/locale/fr/knowledge/file-system/how-to-write-files-in-nodejs.md @@ -0,0 +1,60 @@ +--- +title: How do I write files in Node.js? +date: '2011-08-26T10:08:50.000Z' +tags: + - filesystem +difficulty: 2 +layout: knowledge-post.hbs +--- + +Writing to a file is another of the basic programming tasks that one usually needs to know about - luckily, this task is very simple in Node.js. We can use the handy `writeFile` method inside the standard library's `fs` module, which can save all sorts of time and trouble. + +```javascript +fs = require('fs'); +fs.writeFile(filename, data, [encoding], [callback]) +``` + +`file = (string)` filepath of the file to read + +`data = (string or buffer)` the data you want to write to the file + +`encoding = (optional string)` the encoding of the `data`. Possible encodings are 'ascii', 'utf8', and 'base64'. If no encoding provided, then 'utf8' is assumed. + +`callback = (optional function (err) {})` If there is no error, `err === null`, otherwise `err` contains the error message. + +So if we wanted to write "Hello World" to `helloworld.txt`: + +```javascript +fs = require('fs'); +fs.writeFile('helloworld.txt', 'Hello World!', function (err) { + if (err) return console.log(err); + console.log('Hello World > helloworld.txt'); +}); +``` + +``` +[contents of helloworld.txt]: +Hello World! +``` + +If we purposely want to cause an error, we can try to write to a file that we don't have permission to access: + +```javascript +fs = require('fs') +fs.writeFile('/etc/doesntexist', 'abc', function (err,data) { + if (err) { + return console.log(err); + } + console.log(data); +}); +``` + +``` +{ stack: [Getter/Setter], + arguments: undefined, + type: undefined, + message: 'EACCES, Permission denied \'/etc/doesntexist\'', + errno: 13, + code: 'EACCES', + path: '/etc/doesntexist' } +``` diff --git a/locale/fr/knowledge/file-system/security/introduction.md b/locale/fr/knowledge/file-system/security/introduction.md new file mode 100644 index 000000000000..1fce3790c9bb --- /dev/null +++ b/locale/fr/knowledge/file-system/security/introduction.md @@ -0,0 +1,60 @@ +--- +title: How can I secure my code? +date: +tags: + - filesystem + - security +difficulty: 3 +layout: knowledge-post.hbs +--- + +Sometimes, you might want to let users read or write files on your server. For example, maybe you want to write a forum software without using an actual database. The problem is that you do not want your users to be able to modify or to read arbitrary files on your server, and there sometimes are ways to get around restrictions that should prevent it. Read on to see how you can secure your code against evil attackers trying to mess with your files. + +## Poison Null Bytes + +Poison null bytes are a way to trick your code into seeing another filename than the one that will actually be opened. This can in many cases be used to circumvent directory traversal protections, to trick servers into delivering files with wrong file types and to circumvent restrictions on the file names that may be used. [A more detailed description is here.](http://groups.google.com/group/nodejs/browse_thread/thread/51f66075e249d767/85f647474b564fde) Always use code like this when accessing files with user-supplied names: + +```javascript +if (filename.indexOf('\0') !== -1) { + return respond('That was evil.'); +} +``` + +## Whitelisting + +You won't always be able to use whitelisting, but if you are, do it - it's very easy to implement and hard to get wrong. For example, if you know that all filenames are lowercase alphanumeric strings: + +```javascript +if (!/^[a-z0-9]+$/.test(filename)) { + return respond('illegal character'); +} +``` + +However, note that whitelisting alone isn't sufficient anymore as soon as you allow dots and slashes - people could enter things like `../../etc/passwd` in order to get files from outside the allowed folder. + +## Preventing Directory Traversal + +Directory traversal means that an attacker tries to access files outside of the folder you want to allow him to access. You can prevent this by using nodes built-in "path" module. **Do not implement the stuff in the path module again yourself** - for example, when someone runs your code on a windows server, not handling backslashes like slashes will allow attackers to do directory traversal. + +This example assumes that you already checked the `userSuppliedFilename` variable as described in the "Poison Null Bytes" section above. + +```javascript +var rootDirectory = '/var/www/'; +``` + +Make sure that you have a slash at the end of the allowed folders name - you don't want people to be able to access `/var/www-secret/`, do you?. + +```javascript +var path = require('path'); +var filename = path.join(rootDirectory, userSuppliedFilename); +``` + +Now `filename` contains an absolute path and doesn't contain `..` sequences anymore - `path.join` takes care of that. However, it might be something like `/etc/passwd` now, so you have to check whether it starts with the `rootDirectory`: + +```javascript +if (filename.indexOf(rootDirectory) !== 0) { + return respond('trying to sneak out of the web root?'); +} +``` + +Now the `filename` variable should contain the name of a file or directory that's inside the allowed directory (unless it doesn't exist). diff --git a/locale/fr/knowledge/getting-started/control-flow/how-to-write-asynchronous-code.md b/locale/fr/knowledge/getting-started/control-flow/how-to-write-asynchronous-code.md new file mode 100644 index 000000000000..0f1ef5181769 --- /dev/null +++ b/locale/fr/knowledge/getting-started/control-flow/how-to-write-asynchronous-code.md @@ -0,0 +1,89 @@ +--- +title: How to write asynchronous code +date: '2011-08-26T10:08:50.000Z' +tags: + - asynchronous + - callbacks + - event-emitters +difficulty: 1 +layout: knowledge-post.hbs +--- + +Node.js promotes an asynchronous coding style from the ground up, in contrast to many of the most popular web frameworks. There are a number of important things to be aware of when learning to write asynchronous code - otherwise, you will often find your code executing in extremely unexpected ways. Take this (general) rule to heart: + +### Use the asynchronous functions, avoid the synchronous ones! + +Many of the functions in Node.js core have both synchronous and asynchronous versions. Under most circumstances, it will be far better for you to use the asynchronous functions - otherwise, why are you using Node.js? + +As a quick example comparing and contrasting the two, using `fs.readFile`: + +```javascript +var fs = require('fs'); + +fs.readFile('example.file', 'utf8', function (err, data) { + if (err) { + return console.log(err); + } + console.log(data); +}); + +//==================== + +var data = fs.readFileSync('example.file','utf8'); +console.log(data); +``` + +Just looking at these two blocks of code, the synchronous version appears to be more concise. However, the asynchronous version is more complicated for a very good reason. In the synchronous version, the world is paused until the file is finished reading - your process will just sit there, waiting for the OS (which handles all file system tasks). + +The asynchronous version, on the other hand, does not stop time - instead, the callback function gets called when the file is finished reading. This leaves your process free to execute other code in the meantime. + +When only reading a file or two, or saving something quickly, the difference between synchronous and asynchronous file I/O can be quite small. On the other hand, though, when you have multiple requests coming in per second that require file or database IO, trying to do that IO synchronously would be quite thoroughly disastrous for performance. + +### Callbacks +Callbacks are a basic idiom in Node.js for asynchronous operations. When most people talk about callbacks, they mean the function that is passed as the last parameter to an asynchronous function. The callback is then later called with any return value or error message that the function produced. For more details, see the article on [callbacks](/en/knowledge/getting-started/control-flow/what-are-callbacks/) + +### Event Emitters +Event Emitters are another basic idiom in Node.js. A constructor is provided in Node.js core: `require('events').EventEmitter`. An Event Emitter is typically used when there will be multiple parts to the response (since usually you only want to call a callback once). For more details, see the article on [EventEmitters](/en/knowledge/getting-started/control-flow/what-are-event-emitters/) + +### A gotcha with asynchronous code +A common mistake in asynchronous code with JavaScript is to write code that does something like this: + +```javascript +for (var i = 0; i < 5; i++) { + setTimeout(function () { + console.log(i); + }, i); +} +``` + +The unexpected output is then: + +``` +5 +5 +5 +5 +5 +``` + +The reason this happens is because each timeout is created and then `i` is incremented. Then when the callback is called, it looks for the value of `i` and it is 5. The solution is to create a closure so that the current value of `i` is stored. For example: + +```javascript +for (var i = 0; i < 5; i++) { + (function(i) { + setTimeout(function () { + console.log(i); + }, i); + })(i); +} +``` + +This gives the proper output: + +``` +0 +1 +2 +3 +4 +``` diff --git a/locale/fr/knowledge/getting-started/control-flow/what-are-callbacks.md b/locale/fr/knowledge/getting-started/control-flow/what-are-callbacks.md new file mode 100644 index 000000000000..c96c962b4dfa --- /dev/null +++ b/locale/fr/knowledge/getting-started/control-flow/what-are-callbacks.md @@ -0,0 +1,59 @@ +--- +title: What are callbacks? +date: '2011-08-26T10:08:50.000Z' +tags: + - javascript + - core + - asynchronous + - callbacks +difficulty: 1 +layout: knowledge-post.hbs +--- + +In a synchronous program, you would write something along the lines of: + +```javascript +function processData () { + var data = fetchData (); + data += 1; + return data; +} +``` + +This works just fine and is very typical in other development environments. However, if fetchData takes a long time to load the data (maybe it is streaming it off the drive or the internet), then this causes the whole program to 'block' - otherwise known as sitting still and waiting - until it loads the data. Node.js, being an asynchronous platform, doesn't wait around for things like file I/O to finish - Node.js uses callbacks. A callback is a function called at the completion of a given task; this prevents any blocking, and allows other code to be run in the meantime. + +The Node.js way to deal with the above would look a bit more like this: + +```javascript +function processData (callback) { + fetchData(function (err, data) { + if (err) { + console.log("An error has occurred. Abort everything!"); + return callback(err); + } + data += 1; + callback(data); + }); +} +``` + +At first glance, it may look unnecessarily complicated, but callbacks are the foundation of Node.js. Callbacks give you an interface with which to say, "and when you're done doing that, do all this." This allows you to have as many IO operations as your OS can handle happening at the same time. For example, in a web server with hundreds or thousands of pending requests with multiple blocking queries, performing the blocking queries asynchronously gives you the ability to be able to continue working and not just sit still and wait until the blocking operations come back. This is a major improvement. + +The typical convention with asynchronous functions (which almost all of your functions should be): + +```javascript +function asyncOperation ( a, b, c, callback ) { + // ... lots of hard work ... + if ( /* an error occurs */ ) { + return callback(new Error("An error has occurred")); + } + // ... more work ... + callback(null, d, e, f); +} + +asyncOperation ( params.., function ( err, returnValues.. ) { + //This code gets run after the async operation gets run +}); +``` + +You will almost always want to follow the [error callback convention](/en/knowledge/errors/what-are-the-error-conventions/), since most Node.js users will expect your project to follow them. The general idea is that the callback is the last parameter. The callback gets called after the function is done with all of its operations. Traditionally, the first parameter of the callback is the `error` value. If the function hits an error, then they typically call the callback with the first parameter being an Error object. If it cleanly exits, then they will call the callback with the first parameter being null and the rest being the return value(s). diff --git a/locale/fr/knowledge/getting-started/control-flow/what-are-event-emitters.md b/locale/fr/knowledge/getting-started/control-flow/what-are-event-emitters.md new file mode 100644 index 000000000000..88bbd347435f --- /dev/null +++ b/locale/fr/knowledge/getting-started/control-flow/what-are-event-emitters.md @@ -0,0 +1,109 @@ +--- +title: What are Event Emitters? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - asynchronous + - event-emitters +difficulty: 2 +layout: knowledge-post.hbs +--- + +In Node.js an event can be described simply as a string with a corresponding callback. An event can be "emitted" (or in other words, the corresponding callback be called) multiple times or you can choose to only listen for the first time it is emitted. So a simple example ran on the node [REPL](/en/knowledge/REPL/how-to-use-nodejs-repl/): + +```javascript +var example_emitter = new (require('events').EventEmitter); +example_emitter.on("test", function () { console.log("test"); }); +example_emitter.on("print", function (message) { console.log(message); }); +example_emitter.emit("test"); +example_emitter.emit("print", "message"); +example_emitter.emit("unhandled"); +``` + +``` +> var example_emitter = new (require('events').EventEmitter); +{} +> example_emitter.on("test", function () { console.log("test"); }); +{ _events: { test: [Function] } } +> example_emitter.on("print", function (message) { console.log(message); }); +{ _events: { test: [Function], print: [Function] } } +> example_emitter.emit("test"); +test //console.log'd +true //return value +> example_emitter.emit("print", "message"); +message //console.log'd +true //return value +> example_emitter.emit("unhandled"); +false //return value +``` + +This demonstrates all the basic functionality of an EventEmitter. The `on` or `addListener` method (basically the subscription method) allows you to choose the event to watch for and the callback to be called. The `emit` method (the publish method), on the other hand, allows you to "emit" an event, which causes all callbacks registered to the event to 'fire', (get called). + +So in the example, we first subscribe to both the `test` and `print` events. Then we emit the `test`, `print`, and `unhandled` events. Since `unhandled` has no callback, it just returns false; the other two run all the attached callbacks and return true. + +In the `print` event, note that we pass an extra parameter - all the extra parameters passed to 'emit' get passed to the callback function as arguments. + +If you use the method `once` instead of `on`, after the callback is fired, it is removed from the list of callbacks. A handy little function if you want to detect only the first time an event has been emitted. + +If you want remove a specific callback, you can use `removeListener`. If you want to remove all callbacks to a specific event, you can use `removeAllListeners`. + +```javascript +var EventEmitter = require('events').EventEmitter, + ee = new EventEmitter(); + +function callback() { + console.log("Callback has been called!"); +} + +ee.once("event", callback); +ee.emit("event"); +ee.emit("event"); + +ee.on("event", callback); +ee.emit("event"); +ee.emit("event"); +ee.removeListener("event", callback); +ee.emit("event"); + +ee.on("event", callback); +ee.emit("event"); +ee.removeAllListeners("event"); +ee.emit("event"); +``` + +``` +> var ee = new (require('events').EventEmitter); +> var callback = function () { console.log("Callbacked!"); } +> ee.once("event", callback); +{ _events: { event: { [Function: g] listener: [Function] } } } +> ee.emit("event"); +Callbacked! //console.log'd +true +> ee.emit("event"); +false + +> ee.on("event", callback); +{ _events: { event: [Function] } } +> ee.emit("event"); +Callbacked! //console.log'd +true +> ee.emit("event"); +Callbacked! //console.log'd +true +> ee.removeListener("event", callback); +{ _events: {} } +> ee.emit("event"); +false + +> ee.on("event", callback); +{ _events: { event: [Function] } } +> ee.emit("event"); +Callbacked! //console.log'd +true +> ee.removeAllListeners("event"); +{ _events: { event: null } } +> ee.emit("event"); +false +``` + +NOTE: If you want create more than 10 listeners on a single event, you will have to make a call to `ee.setMaxListeners(n)` where n is the max numbers of listeners (with zero being unlimited number of listeners). This is used to make sure you aren't accidentally leaking event listeners. diff --git a/locale/fr/knowledge/getting-started/globals-in-node-js.md b/locale/fr/knowledge/getting-started/globals-in-node-js.md new file mode 100644 index 000000000000..fa172c1bfd48 --- /dev/null +++ b/locale/fr/knowledge/getting-started/globals-in-node-js.md @@ -0,0 +1,27 @@ +--- +title: The built-in globals in Node.js +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - globals +difficulty: 1 +layout: knowledge-post.hbs +--- + +Node.js has a number of built-in global identifiers that every Node.js developer should have some familiarity with. Some of these are true globals, being visible everywhere; others exist at the module level, but are inherent to every module, thus being pseudo-globals. + +First, let's go through the list of 'true globals': + +* `global` - The global namespace. Setting a property to this namespace makes it globally visible within the running process. +* `process` - The Node.js built-in `process` module, which provides interaction with the current Node.js process. [Read More](/en/knowledge/getting-started/the-process-module/) +* `console` - The Node.js built-in `console` module, which wraps various STDIO functionality in a browser-like way. [Read More](/en/knowledge/getting-started/the-console-module/) +* `setTimeout()`, `clearTimeout()`, `setInterval()`, `clearInterval()` - The built-in timer functions are globals. [Read More](/en/knowledge/javascript-conventions/what-are-the-built-in-timer-functions/) + +As mentioned above, there are also a number of 'pseudo-globals' included at the module level in every module: + +* `module`, `module.exports`, `exports` - These objects all pertain to the Node.js module system. [Read More](/en/knowledge/getting-started/what-is-require/) +* `__filename` - The `__filename` keyword contains the path of the currently executing file. Note that this is not defined while running the [Node.js REPL](/en/knowledge/REPL/how-to-use-nodejs-repl/). +* `__dirname` - Like `__filename`, the `__dirname` keyword contains the path to the root directory of the currently executing script. Also not present in the Node.js REPL. +* `require()` - The `require()` function is a built-in function, exposed per-module, that allows other valid modules to be included. [Read More](/en/knowledge/getting-started/what-is-require/) + +Much of this functionality can be extremely useful for a Node.js developer's daily life - but at the very least, remember these as bad names to use for your own functions! diff --git a/locale/fr/knowledge/getting-started/how-to-debug-nodejs-applications.md b/locale/fr/knowledge/getting-started/how-to-debug-nodejs-applications.md new file mode 100644 index 000000000000..ef6904d1f99d --- /dev/null +++ b/locale/fr/knowledge/getting-started/how-to-debug-nodejs-applications.md @@ -0,0 +1,110 @@ +--- +title: How to debug a node application +date: '2011-08-26T10:08:50.000Z' +tags: + - debug +difficulty: 1 +layout: knowledge-post.hbs +--- + +Often times, not just in the Node.js community but in software at large, people debug simply with a liberal sprinkle of standard output statements. This allows you to track down where unexpected values are being generated. However, this method can be tedious, or worse yet, not robust enough to detect the real problem. + +### Set up + +Thankfully, through the use of `node-inspector`, we can harness to power of the webkit-debuggers to work with our Node.js code. The process itself is simple. + +First, ensure that node-inspector is installed: + +``` +npm install node-inspector -g +``` + +A good example application to experiment with is a basically 'hello world' server with a counter (copied from the `node-inspector` repo): + +```javascript +var http = require('http'); + +var x = 0; +http.createServer(function (req, res) { + x += 1; + res.writeHead(200, {'Content-Type': 'text/plain'}); + res.end('Hello World ' + x); +}).listen(8124); +console.log('Server running at http://127.0.0.1:8124/'); +``` + +First, we start your node program with debugging enabled. + +``` +node --debug app.js +``` + +which should print something along the lines of `debugger listening on port 5858` to stderr. Take note of the port number, it is the port that the debugger is running on. + +Next, start up `node-inspector`. If your program uses port 8080, then you may have to pass it a custom port. + +``` +node-inspector [--web-port=] +``` + +Finally you fire up a webkit browser such as chrome or safari. and go to `127.0.0.1:8080/debug?port=5858`. Note, if the debugger is listening on a port other than `5858`, you will need to change it. Also, if you passed a custom webport to node-inspector, then you will have to modify the `8080`. + +At this point, you will be met with a fairly empty screen with the `scripts`, `profiles`, and `console` tabs. + +### Scripts tab + +This is just like most webkit/firebug debuggers. It has a list of all the JavaScript files (including Node.js core and third party libraries) which you can select and dive into. To stop the interpreter on a specific line, you set a breakpoint by clicking on the number of the desired line. When the execution is frozen, by a breakpoint or by manually pausing interpretation by pressing the pause button, you can check the callstack and examine all the local, closure, and global variables. You can also modify the code to try and fix behavior. Note that when you modify the code through the script tab, it does not get saved to the file, so you will need to transfer the modifications back by hand. + +### Profiles tab + +To use the profile tab, you need a library called `v8-profiler`: + +``` +npm install v8-profiler +``` + +Next, you have to require it inside the file you are debugging: + +```javascript +var profiler = require('v8-profiler'); +``` + +Now you can finally enable the `profiles` tab, unfortunately, all you can do from this screen is a heap snapshot. So from the code, you need to select where you want to start to cpu profiler and can select more precise location for heap snapshots. + +To take a heap snapshot, just insert this line in the desired location and optionally pass it a name. + +```javascript +var snapshot = profiler.takeSnapshot(name); +``` + +To take a cpu profile, just surround the code that you are profiling with the two lines shown below. Optionally, a name can be included to indentify the cpu profile. + +```javascript +profiler.startProfiling(name); +//..lots and lots of methods and code called..// +var cpuProfile = profiler.stopProfiling([name]); +``` + +As an example how to use these, here is the code given earlier modified to take a cpu profile on every request and take a heap snapshot: after the server is created. + +```javascript +var http = require('http'); +var profiler = require('v8-profiler'); + +var x = 0; +http.createServer(function (req, res) { + x += 1; + profiler.startProfiling('request '+x); + res.writeHead(200, {'Content-Type': 'text/plain'}); + res.end('Hello World ' + x); + profiler.stopProfiling('request '+x); +}).listen(8124); +profiler.takeSnapshot('Post-Server Snapshot'); +console.log('Server running at http://127.0.0.1:8124/'); +``` + +Note that despite these apis returning objects, it is much easier to sort through the data through the node-inspector interface. Hopefully with these tools, you can make more informed decisions about memory leaks and bottlenecks. + +### Console tab + +Finally, the console tab allows you to use node's REPL in your program's global scope. This has a few gotchas since that means you can not access in local variables. Thus the variables you can read or write are variables that were defined without a `var` statement. The other gotcha is when you use `console.log` refers to node's `console.log` and not webkit's console.log. This means the output goes to stdout and not to your console tab. Otherwise it is a very straightforward node REPL. diff --git a/locale/fr/knowledge/getting-started/how-to-use-util-inspect.md b/locale/fr/knowledge/getting-started/how-to-use-util-inspect.md new file mode 100644 index 000000000000..99b9ea04479f --- /dev/null +++ b/locale/fr/knowledge/getting-started/how-to-use-util-inspect.md @@ -0,0 +1,62 @@ +--- +title: How to use util.inspect +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - debug +difficulty: 1 +layout: knowledge-post.hbs +--- + +Node.js provides a utility function, for debugging purposes, that returns a string representation of an object. `util.inspect()` can be a true lifesaver while working with properties of large, complex objects. + +Let's provide a basic example. `util.inspect()` can be used on any object - a good demonstration will be one of the built-in objects of Node.js. Try this in the REPL (type `node` at your command line with no arguments): + +```javascript +var util = require('util'); +util.inspect(console); +``` + +The output will be: + +``` +'{ log: [Function], info: [Function], warn: [Function], error: [Function], dir: [Function], time: [Function], timeEnd: [Function], trace: [Function], assert: [Function] }' +``` + +This is a listing of all the enumerable properties of the `console` object. It is also worth noting that `console.dir` is a wrapper around `util.inspect` that uses its default arguments. + +In the REPL, `util.inspect` will immediately return its output - this is not usually the case. In the context of normal Node.js code in a file, something must be done with the output. The simplest thing to do: + +```javascript +console.log(util.inspect(myObj)); +``` + +`util.inspect` can also be passed several optional arguments, shown here with their defaults: + +```javascript +util.inspect(object, showHidden=false, depth=2, colorize=true); +``` + +For example, `util.inspect(myObj, true, 7, true)` would inspect `myObj`, showing all the hidden and non-hidden properties up to a depth of `7` and colorize the output. Let's go over the arguments individually. + +The `depth` argument is the number of levels deep into a nested object to recurse - it defaults to 2. Setting it to `null` will cause it to recurse 'all the way', showing every level. Compare the (size of) the outputs of these two `util.inspect` statements in the REPL: + +```javascript +var http = require('http'); +util.inspect(http, true, 1); +util.inspect(http, true, 3); +``` + +The optional argument `showHidden` is a boolean that determines whether or not the 'non-enumerable' properties of an object will be displayed - it defaults to `false`, which tends to result in vastly more readable output. This isn't something a beginner needs to worry about most of the time, but it's worth demonstrating briefly. Once more, try the following in the REPL: + +```javascript +var util = require('util'); +util.inspect(console, true); +``` + +Finally, the optional argument `colorize` is a boolean that adds ANSI escape codes to the string output. When logged to a terminal window, it should be pretty printed with colors. + +```javascript +var util = require('util'); +console.log(util.inspect({a:1, b:"b"}, false,2,true)); +``` diff --git a/locale/fr/knowledge/getting-started/npm/how-to-access-module-package-info.md b/locale/fr/knowledge/getting-started/npm/how-to-access-module-package-info.md new file mode 100644 index 000000000000..364805eddff9 --- /dev/null +++ b/locale/fr/knowledge/getting-started/npm/how-to-access-module-package-info.md @@ -0,0 +1,37 @@ +--- +title: How to access module package info +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - npm +difficulty: 1 +layout: knowledge-post.hbs +--- + +There are many situations in the world of software development where using the wrong version of a dependency or submodule can cause all sorts of pain and anguish - luckily for you, Node.js has a module available called pkginfo that can help keep these sorts of troubles at bay. + +Let's take a look at pkginfo - first, install via npm: + +``` +npm install pkginfo +``` + +Now all we need to do is require it, and invoke it. + +```javascript +var pkginfo = require('pkginfo')(module); + +console.dir(module.exports); +``` + +That would show us the entire contents of the package.json, neatly displayed to our console. If we only wanted certain pieces of information, we just specify them like so: + +```javascript +var pkginfo = require('pkginfo')(module, 'version', 'author'); + +console.dir(module.exports); +``` + +And only the fields we specify will be shown to us. + +For more information, see http://github.com/indexzero/ . diff --git a/locale/fr/knowledge/getting-started/npm/what-is-npm.md b/locale/fr/knowledge/getting-started/npm/what-is-npm.md new file mode 100644 index 000000000000..25ef3ce049b8 --- /dev/null +++ b/locale/fr/knowledge/getting-started/npm/what-is-npm.md @@ -0,0 +1,26 @@ +--- +title: What is npm? +date: '2011-08-26T10:08:50.000Z' +tags: + - npm +difficulty: 1 +layout: knowledge-post.hbs +--- + +`npm`, short for Node Package Manager, is two things: first and foremost, it is an online repository for the publishing of open-source Node.js projects; second, it is a command-line utility for interacting with said repository that aids in package installation, version management, and dependency management. A plethora of Node.js libraries and applications are published on npm, and many more are added every day. These applications can be searched for on http://npmjs.org/. Once you have a package you want to install, it can be installed with a single command-line command. + +Let's say you're hard at work one day, developing the Next Great Application. You come across a problem, and you decide that it's time to use that cool library you keep hearing about - let's use Caolan McMahon's [async](http://github.com/caolan/async) as an example. Thankfully, `npm` is very simple to use: you only have to run `npm install async`, and the specified module will be installed in the current directory under `./node_modules/`. Once installed to your `node_modules` folder, you'll be able to use `require()` on them just like they were built-ins. + +Let's look at an example of a global install - let's say `coffee-script`. The npm command is simple: `npm install coffee-script -g`. This will typically install the program and put a symlink to it in `/usr/local/bin/`. This will then allow you to run the program from the console just like any other CLI tool. In this case, running `coffee` will now allow you to use the coffee-script REPL. + +Another important use for npm is dependency management. When you have a node project with a [package.json](/en/knowledge/getting-started/npm/what-is-the-file-package-json/) file, you can run `npm install` from the project root and npm will install all the dependencies listed in the package.json. This makes installing a Node.js project from a git repo much easier! For example, `vows`, a Node.js testing framework, can be installed from git, and its single dependency, `eyes`, can be automatically handled: + +Example: + +``` +git clone https://github.com/cloudhead/vows.git +cd vows +npm install +``` + +After running those commands, you will see a `node_modules` folder containing all of the project dependencies specified in the package.json. diff --git a/locale/fr/knowledge/getting-started/npm/what-is-the-file-package-json.md b/locale/fr/knowledge/getting-started/npm/what-is-the-file-package-json.md new file mode 100644 index 000000000000..00ea1daf422e --- /dev/null +++ b/locale/fr/knowledge/getting-started/npm/what-is-the-file-package-json.md @@ -0,0 +1,50 @@ +--- +title: What is the file `package.json`? +date: '2011-08-26T10:08:50.000Z' +tags: + - npm + - conventions + - core +difficulty: 2 +layout: knowledge-post.hbs +--- + +All npm packages contain a file, usually in the project root, called `package.json` - this file holds various metadata relevant to the project. This file is used to give information to `npm` that allows it to identify the project as well as handle the project's dependencies. It can also contain other metadata such as a project description, the version of the project in a particular distribution, license information, even configuration data - all of which can be vital to both `npm` and to the end users of the package. The `package.json` file is normally located at the root directory of a Node.js project. + +Node.js itself is only aware of two fields in the `package.json`: + +```json +{ + "name" : "barebones", + "version" : "0.0.0", +} +``` + +The `name` field should explain itself: this is the name of your project. The `version` field is used by npm to make sure the right version of the package is being installed. Generally, it takes the form of `major.minor.patch` where `major`, `minor`, and `patch` are integers which increase after each new release. For more details, look at this spec: http://semver.org . + +For a more complete package.json, we can check out `underscore`: + +```json +{ + "name" : "underscore", + "description" : "JavaScript's functional programming helper library.", + "homepage" : "http://documentcloud.github.com/underscore/", + "keywords" : ["util", "functional", "server", "client", "browser"], + "author" : "Jeremy Ashkenas ", + "contributors" : [], + "dependencies" : [], + "repository" : {"type": "git", "url": "git://github.com/documentcloud/underscore.git"}, + "main" : "underscore.js", + "version" : "1.1.6" +} +``` + +As you can see, there are fields for the `description` and `keywords` of your projects. This allows people who find your project understand what it is in just a few words. The `author`, `contributors`, `homepage` and `repository` fields can all be used to credit the people who contributed to the project, show how to contact the author/maintainer, and give links for additional references. + +The file listed in the `main` field is the main entry point for the library; when someone runs `require()`, require resolves this call to `require()`. + +Finally, the `dependencies` field is used to list all the dependencies of your project that are available on `npm`. When someone installs your project through `npm`, all the dependencies listed will be installed as well. Additionally, if someone runs `npm install` in the root directory of your project, it will install all the dependencies to `./node_modules`. + +It is also possible to add a `devDependencies` field to your `package.json` - these are dependencies not required for normal operation, but required/recommended if you want to patch or modify the project. If you built your unit tests using a testing framework, for example, it would be appropriate to put the testing framework you used in your `devDependencies` field. To install a project's `devDependencies`, simply pass the `--dev` option when you use `npm install`. + +For even more options, you can look through the [online docs](https://docs.npmjs.com/files/package.json) or run `npm help json`. diff --git a/locale/fr/knowledge/getting-started/the-console-module.md b/locale/fr/knowledge/getting-started/the-console-module.md new file mode 100644 index 000000000000..afc150371352 --- /dev/null +++ b/locale/fr/knowledge/getting-started/the-console-module.md @@ -0,0 +1,57 @@ +--- +title: The built-in console module +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - cli + - globals +difficulty: 1 +layout: knowledge-post.hbs +--- + +Anyone familiar with browser-side development has probably used `console.log` for debugging purposes - Node.js has implemented a built-in `console` object to mimic much of this experience. Since we're working server-side, however, it wraps `stdout`, `stdin`, and `stderr` instead of the browser's debugging console. + +Because of this browser parallel, the `console` module has become home to quite a bit of standard output functionality of Node.js. The simplest is `console.log()`. + +```javascript +console.log('Hi, everybody!'); +console.log('This script is:', __filename); +console.log(__filename, process.title, process.argv); +``` + +The first, simplest example just prints the provided string to `stdout`. It can also be used to output the contents of variables, as evidenced in #2; furthermore, `console.dir()` is called on any objects passed in as arguments, enumerating their properties. + +NODE.JS PRO TIP: `console.log()` accepts three format characters, `%s`, `%d`, and `%j`. These format characters can be used to insert string, integer, or JSON data into your output - the order of format characters must match the order of arguments. + +```javascript +var name = 'Harry', + number = 17, + myObj = { + propOne: 'stuff', + propTwo: 'more stuff' + }; +console.log('My name is %s, my number is %d, my object is %j', name, number, myObj); +``` + +A gotcha with `console.log`, and all functions that depend on it, is that it buffers the output. So if your process ends suddenly, whether it be from an exception or from `process.exit()`, it is entirely possible that the buffered output will never reach the screen. This can cause a great deal of frustration, so watch out for this unfortunate situation. + +`console.error()` works the same as `console.log`, except that the output is sent to `stderr` instead of `stdout`. This is actually an extremely important difference, as `stderr` is always written to synchronously. Any use of `console.error`, or any of the other functions in Node.js core that write to `stderr`, will block your process until the output has all been written. This is useful for error messages - you get them exactly when they occur - but if used everywhere, can greatly slow down your process. + +`console.dir()`, as mentioned above, is an alias for `util.inspect()` - it is used to enumerate object properties. [Read More](/en/knowledge/getting-started/how-to-use-util-inspect/) + +That covers the basic `console` module functionality, but there are a few other methods worth mentioning as well. First, the `console` module allows for the marking of time via `console.time()` and `console.timeEnd()`. Here is an example: + +```javascript +console.time('myTimer'); +var string = ''; +for (var i = 0; i < 300; i++) { + (function (i) { + string += 'aaaa' + i.toString(); + })(i); +} +console.timeEnd('myTimer'); +``` + +This would determine the amount of time taken to perform the actions in between the `console.time` and `console.timeEnd` calls. + +One last function worth mentioning is `console.trace()`, which prints a stack trace to its location in your code without throwing an error. This can occasionally be useful if you'd like to figure out where a particular failing function was called from. diff --git a/locale/fr/knowledge/getting-started/the-process-module.md b/locale/fr/knowledge/getting-started/the-process-module.md new file mode 100644 index 000000000000..2442350b0dcb --- /dev/null +++ b/locale/fr/knowledge/getting-started/the-process-module.md @@ -0,0 +1,117 @@ +--- +title: How to use the global process module +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - globals +difficulty: 2 +layout: knowledge-post.hbs +--- + +Each Node.js process has a set of built-in functionality, accessible through the global `process` module. The `process` module doesn't need to be required - it is somewhat literally a wrapper around the currently executing process, and many of the methods it exposes are actually wrappers around calls into core C libraries. + +## Events + +There are two built-in events worth noting in the `process` module, `exit` and `uncaughtException`. + +The `exit` event fires whenever the process is about to exit. + +```javascript +process.on('exit', function () { + fs.writeFileSync('/tmp/myfile', 'This MUST be saved on exit.'); +}); +``` + +Code like the above can occasionally be useful for saving some kind of final report before you exit. Note the use of a synchronous file system call - this is to make sure the I/O finishes before the process actually exits. + +The other built-in event is called `uncaughtException`. It fires, as you might guess, whenever an exception has occurred that hasn't been caught or dealt with somewhere else in your program. It's not the ideal way to handle errors, but it can be very useful as a last line of defense if a program needs to stay running indefinitely. + +```javascript +process.on('uncaughtException', function (err) { + console.error('An uncaught error occurred!'); + console.error(err.stack); +}); +``` + +The default behavior on `uncaughtException` is to print a stack trace and exit - using the above, your program will display the message provided and the stack trace, but will **not** exit. + +## Streams + +The `process` object also provides wrappings for the three `STDIO` streams, `stdin`, `stdout`, and `stderr`. Put briefly, `stdin` is a readable stream (where one would read input from the user), `stdout` is a non-blocking writeable stream (writes to `stdout` are asynchronous, in other words), and `stderr` is a blocking (synchronous) writeable stream. + +The simplest one to describe is `process.stdout`. Technically, most output in Node.js is accomplished by using `process.stdout.write()` - though most people would never know it. The following is from `console.js` in Node.js core: + +```javascript +exports.log = function() { + process.stdout.write(format.apply(this, arguments) + '\n'); +}; +``` + +Since most people are used to the `console.log` syntax from browser development, it was provided as a convenient wrapper. + +Next we have `process.stderr`, which is very similar to `process.stdout` with one key exception - it blocks. When you write to `stderr`, your process blocks until the write is completed. Node.js provides a number of alias functions for output, most of which either end up using `stdout` or `stderr` under the hood. Here's a quick reference list: + +STDOUT, or non-blocking functions: `console.log`, `console.info`, `util.puts`, `util.print` + +STDERR, or blocking functions: `console.warn`, `console.error`, `util.debug` + +Lastly, `process.stdin` is a readable stream for getting user input. See [more on cli input](/en/knowledge/command-line/how-to-prompt-for-command-line-input/). + +## Other Properties + +The `process` object additionally contains a variety of properties that allow you to access information about the running process. Let's run through a few quick examples with the help of the REPL: + +``` +> process.pid +3290 +> process.version +'v0.4.9' +> process.platform +'linux' +> process.title +'node' +``` + +The `pid` is the OS Process ID, `platform` is something general like 'linux' or 'darwin', and `version` refers to your Node.js version. `process.title` is a little bit different - while set to `node` by default, it can be set to anything you want, and will be what gets displayed in lists of running processes. + +The `process` module also exposes `process.argv`, an array containing the command-line arguments to the current process, and `process.argc`, an integer representing the number of arguments passed in. Read more on [how to parse command line arguments](/en/knowledge/command-line/how-to-parse-command-line-arguments/) + +`process.execPath` will return the absolute path of the executable that started this process. + +`process.env` contains your environment variables. Try `process.env.HOME`, for example. + +## Methods + +There are also a variety of methods attached to the `process` object, many of which deal with quite advanced aspects of a program. We'll take a look at a few of the more commonly useful ones, while leaving the more advanced parts for another article. + +`process.exit` exits the process. If you call an asynchronous function and then call `process.exit()` immediately afterwards, you will be in a race condition - the asynchronous call may or may not complete before the process is exited. `process.exit` accepts one optional argument - an integer exit code. `0`, by convention, is an exit with no errors. + +`process.cwd` returns the 'current working directory' of the process - this is often the directory from which the command to start the process was issued. + +`process.chdir` is used to change the current working directory. For example: + +``` +> process.cwd() +'/home/avian/dev' +> process.chdir('/home/avian') +> process.cwd() +'/home/avian' +``` + +Finally, on a more advanced note, we have `process.nextTick`. This method accepts one argument - a callback - and places it at the top of the next iteration of the event loop. Some people do something like this: + +```javascript +setTimeout(function () { + // code here +}, 0) +``` + +This, however, is not ideal. In Node.js, this should be used instead: + +```javascript +process.nextTick(function () { + console.log('Next trip around the event loop, wheeee!') +}); +``` + +It is much more efficient, and much more accurate. diff --git a/locale/fr/knowledge/getting-started/what-is-node-core-verus-userland.md b/locale/fr/knowledge/getting-started/what-is-node-core-verus-userland.md new file mode 100644 index 000000000000..327721c25042 --- /dev/null +++ b/locale/fr/knowledge/getting-started/what-is-node-core-verus-userland.md @@ -0,0 +1,39 @@ +--- +title: What is node core versus userland +date: '2011-08-26T10:08:50.000Z' +tags: + - npm + - core + - userland + - terminology +difficulty: 1 +layout: knowledge-post.hbs +--- + +Occasionally, in the discussions in the NodeJS mailing lists and IRC channels, you may hear things referred to as "node-core" and "userland". + +Of course, traditionally, "userland" or "userspace" refer to everything outside the operating system kernel. In that sense, Node.js itself is a "userland" program. + +However, in the context of NodeJS, "core" refers to the modules and bindings that are compiled into NodeJS. In general, they provide a hook into very well-understood low-level functionality which almost all networking programs are going to require: TCP, HTTP, DNS, the File System, child processes, and a few other things. If something is fancy enough to argue about, there's a good chance it won't be part of node-core. HTTP is about as big as it gets, and if it wasn't so popular, it'd certainly not be a part of node. + +There are also some things in node-core that are simply too painful to do without in a JavaScript environment, or which have been created to implement some BOM constructs which are not part of the JavaScript language, but may as well be (eg, setTimeout, setInterval, and console). + +Everything else is "userland". This includes: npm, express, request, coffee-script, mysql clients, redis clients, and so on. You can often install these programs using [npm](http://npmjs.org/). + +The question of what is properly "node-core" and what belongs in "userland" is a constant battleground. In general, node is based on the philosophy that it should *not* come with "batteries included". It is easier to move things out of node-core than it is to move them in, which means that core modules must continually "pay rent" in terms of providing necessary functionality that nearly everyone finds valuable. + +## This is a Good Thing. + +One goal of node's minimal core library is to encourage people to implement things in creative ways, without forcing their ideas onto everyone. With a tiny core and a vibrant user space, we can all flourish and experiment without the onerous burden of having to always agree all the time. + +## Userland isn't Less + +If anything, it's more. Building functionality in userland rather than in node-core means: + +* You have a lot more freedom to iterate on the idea. +* Everyone who wants your module can install it easily enough (if you publish it with npm). +* You have freedom to break node conventions if that makes sense for your use-case. + +If you believe that something *really* just *needs* to be part of node's core library set, you should *still* build it as a module! It's much more likely to be pulled into node-core if people have a chance to see your great ideas in action, and if its core principles are iterated and polished and tested with real-world use. + +Changing functionality that is included in node-core is very costly. We do it sometimes, but it's not easy, and carries a high risk of regressions. Better to experiment outside, and then pull it into node-core once it's stable. Once it's usable as a userland package, you may even find that it's less essential to node-core than you first thought. diff --git a/locale/fr/knowledge/getting-started/what-is-require.md b/locale/fr/knowledge/getting-started/what-is-require.md new file mode 100644 index 000000000000..79d74067ce6a --- /dev/null +++ b/locale/fr/knowledge/getting-started/what-is-require.md @@ -0,0 +1,66 @@ +--- +title: What is require? +date: '2011-08-26T10:08:50.000Z' +tags: + - npm + - core + - globals + - builtin +difficulty: 1 +layout: knowledge-post.hbs +--- + +Node.js follows the CommonJS module system, and the builtin `require` function is the easiest way to include modules that exist in separate files. The basic functionality of `require` is that it reads a JavaScript file, executes the file, and then proceeds to return the `exports` object. An example module: + +```javascript +console.log("evaluating example.js"); + +var invisible = function () { + console.log("invisible"); +} + +exports.message = "hi"; + +exports.say = function () { + console.log(exports.message); +} +``` + +So if you run `var example = require('./example.js')`, then `example.js` will get evaluated and then `example` be an object equal to: + +``` +{ + message: "hi", + say: [Function] +} +``` + +If you want to set the exports object to a function or a new object, you have to use the `module.exports` object. So for an example: + +```javascript +module.exports = function () { + console.log("hello world") +} + +require('./example2.js')() //require itself and run the exports object +``` + +It is worth noting that each time you subsequently require an already-required file, the `exports` object is cached and reused. To illustrate this point: + +``` +node> require('./example.js') +evaluating example.js +{ message: 'hi', say: [Function] } +node> require('./example.js') +{ message: 'hi', say: [Function] } +node> require('./example.js').message = "hey" //set the message to "hey" +'hey' +node> require('./example.js') //One might think that this "reloads" the file... +{ message: 'hey', say: [Function] } //...but the message is still "hey" because of the module cache. +``` + +As you can see from the above, `example.js` is evaluated the first time, but all subsequent calls to `require()` only invoke the module cache, rather than reading the file again. As seen above, this can occasionally produce side effects. + +The rules of where `require` finds the files can be a little complex, but a simple rule of thumb is that if the file doesn't start with "./" or "/", then it is either considered a core module (and the local Node.js path is checked), or a dependency in the local `node_modules` folder. If the file starts with "./" it is considered a relative file to the file that called `require`. If the file starts with "/", it is considered an absolute path. NOTE: you can omit ".js" and `require` will automatically append it if needed. For more detailed information, see [the official docs](https://nodejs.org/docs/v0.4.2/api/modules.html#all_Together...) + +An extra note: if the filename passed to `require` is actually a directory, it will first look for `package.json` in the directory and load the file referenced in the `main` property. Otherwise, it will look for an `index.js`. diff --git a/locale/fr/knowledge/index.md b/locale/fr/knowledge/index.md new file mode 100644 index 000000000000..e6ee74d2e977 --- /dev/null +++ b/locale/fr/knowledge/index.md @@ -0,0 +1,6 @@ +--- +title: Knowledge Base +layout: knowledge-base-index.hbs +--- + +# Knowledge Base diff --git a/locale/fr/knowledge/intermediate/how-to-log.md b/locale/fr/knowledge/intermediate/how-to-log.md new file mode 100644 index 000000000000..f0328e323da5 --- /dev/null +++ b/locale/fr/knowledge/intermediate/how-to-log.md @@ -0,0 +1,106 @@ +--- +title: How to log in Node.js +date: '2011-08-26T10:08:50.000Z' +tags: + - logging +difficulty: 2 +layout: knowledge-post.hbs +--- + +Many processes, including most servers, write logs in one form or another. Reasons for logging include debugging, keeping track of users and resource usage, and reporting application state. + +### Simple Logging + +The simplest form of logging involves simply using `console.log` or one of the other standard output methods. In this approach, any information is printed to `stdout` where it can either be read by the developer as it occurs, or, for example, redirected to a log file. + +```javascript +console.log('Web Server started, waiting for connections...'); +``` + +Because it's so simple, console.log is by far the most common way of logging data in Node.js. + +### Custom Logging + +Logging only with functions such as `console.log` is not ideal for every use case, however. Many applications have some sort of 'debugging mode', for example, that shows the user much more output than normal execution. To do something like this, a better idea is to write your own simple logger, and use it instead of `console.log`. + +Here is an example of a basic custom logging module with configurable debugging levels. + +```javascript +var logger = exports; +logger.debugLevel = 'warn'; +logger.log = function(level, message) { + var levels = ['info', 'warn', 'error']; + if (levels.indexOf(level) <= levels.indexOf(logger.debugLevel) ) { + if (typeof message !== 'string') { + message = JSON.stringify(message); + }; + console.log(level+': '+message); + } +} +``` + +Usage would then look like the following: + +```javascript +var logger = require('./logger'); +logger.debugLevel = 'warn'; +logger.log('info', 'Everything started properly.'); +logger.log('warn', 'Running out of memory...'); +logger.log('error', { error: 'flagrant'}); +``` + +Because `logger.debugLevel` was set to `warn`, the warning message and the error would both be displayed, but the `info` message would not be. + +The advantage here is that the behavior of our logging mechanisms can now be modified and controlled from a central part of our code. In this case, logging levels were added, and messages are converted to JSON if they aren't already in string form. There is a lot more that could be done here - saving logs to a file, pushing them to a database, setting custom colors and formatting the output - but by the time you want that much functionality from your custom logging function, it might be time to use an already-existing library. + +### Winston - multi-transport logging made easy + +[Winston](https://github.com/indexzero/winston) is a multi-transport, asynchronous logging library for Node.js. It is conceptually similar to our custom logger, but comes with a wide variety of useful features and functionality baked in. In addition, `winston` is battle-hardened by internal use at Nodejitsu! + +Here is an example of setting up a `winston` logger. This example includes most of the transports one could ever possibly want - please note that most use cases will only warrant a few of these. + +```javascript +var winston = require('winston'); + +require('winston-riak').Riak; +require('winston-mongo').Mongo; +require('winston-couchdb').Couchdb; + +var logger = new (winston.Logger)({ + transports: [ + new winston.transports.Console(), + new winston.transports.File({ filename: 'path/to/all-logs.log' }), + new winston.transports.Couchdb({ 'host': 'localhost', 'db': 'logs' }), + new winston.transports.Riak({ bucket: 'logs' }), + new winston.transports.MongoDB({ db: 'db', level: 'info'}) + ], + exceptionHandlers: [ + new winston.transports.File({ filename: 'path/to/exceptions.log' }) + ] +}); +``` + +Here, we have instantiated a new `winston` logger, and provided a number of logging transports. Winston has built-in support for configurable logging levels, and provides alias methods for each configured logging level. For example, `winston.warn(x)` is an alias for `winston.log('warn', x)`. Thus, the following: + +```javascript +logger.warn('Hull Breach Detected on Deck 7!'); +``` + +Would output to the screen: + +``` +warn: Hull Breach Detected on Deck 7! +``` + +Because of the file transport we set up, winston also logged the warning to 'somefile.log'. After the `logger.warn` call we just used, the log file, `somefile.log`, would contain the following output: + +``` +$ cat somefile.log +{'level':'warn','message':'Hull Breach Detected on Deck 7!'} +``` + +Note that winston's file logger formats the logs differently for file logging (JSON in this case) than it does for the console transport. + +Winston also supports logging to Riak, CouchDB, MongoDB and [many other transports](https://github.com/winstonjs/winston/blob/master/docs/transports.md). The `logger.warn` call we used before also put the same message into each database, according to the options we gave to each transport. + +For further information, please see the [thorough documentation for Winston.](https://github.com/indexzero/winston). diff --git a/locale/fr/knowledge/javascript-conventions/how-to-create-default-parameters-for-functions.md b/locale/fr/knowledge/javascript-conventions/how-to-create-default-parameters-for-functions.md new file mode 100644 index 000000000000..789a0a29082d --- /dev/null +++ b/locale/fr/knowledge/javascript-conventions/how-to-create-default-parameters-for-functions.md @@ -0,0 +1,83 @@ +--- +title: How To Create Default Parameters for Functions +date: '2011-08-26T10:08:50.000Z' +tags: + - javascript + - builtin +difficulty: 1 +layout: knowledge-post.hbs +--- + +Usually a function will take a set number of parameters, and require that all of them be present before it can be executed successfully. However, you will sometimes run into situations where you want to provide a default value for a parameter or take a variable number of parameters. Fortunately, from ES6/ES2015 **default parameters** is in the language specification: + +```js +const pow = (base, power = 2) => { + return Math.pow(base, power); +} + +console.log(pow(2)); // 4 +console.log(pow(2,10)); // 1024 +``` + +In the above code The function `pow` return square of a number or any other power specified in the function call because the argument `power` is given a default value of 2 so whenever no second argument is provided or the provided value is `undefined` the function `pow` will use 2 as the value of argument `power`. But there is a small gotcha in it: + +```js +const pow = (base, power = 2) => { + return Math.pow(base, power); +} + +console.log(pow(2, undefined)); // 4 +console.log(pow(2, null)); // 1 +``` + +In JavaScript there are many ways of indicating ["falsy"(false values)](/en/knowledge/javascript-conventions/what-are-truthy-and-falsy-values/) but out of them, only `undefined` will trigger the default parameter. This is the reason why `pow(2, undefined)` returns 4 and `pow(2, null)` returns 1. + +JS PRO TIP: In `Math.pow(base, power)` if `power` is 0 or any other "falsy" value (except NaN) the result will always be 1. + +Unfortunately, previous versions of JavaScript (ES5 and below) does not support **default parameters** out of the box but over time, however, people have developed idioms to compensate. + +The first idiom is giving a default value for the last parameter. This is done by checking if the last parameter is `undefined` and setting it to a default value if it is. Sometimes people use the idiom: `optionalParameter = optionalParameter || defaultValue`. This can have some undesirable behaviour when they pass values that are equal to false such as `false`, `0`, and `""`. So a better way to do this is by explicitly checking that the optional parameter is `undefined`. Here is some code showing the two styles and the differing behaviour: + +```js +const example = function (optionalArg) { + optionalArg = optionalArg || "No parameter was passed"; + console.log(optionalArg); +} + +const betterExample = function (optionalArg) { + if (optionalArg === undefined) { + optionalArg = "No parameter was passed"; + } + console.log(optionalArg); +} + +console.log("Without parameter:"); +example(); +betterExample(); + +console.log("\nWith paramater:"); +example("parameter was passed"); +betterExample("parameter was passed"); + +console.log("\nEmpty String:"); +example(""); +betterExample(""); +``` + +The second idiom is when the optional value is in the middle it can cause some undesired effects since all the parameters are shifted over. The optional parameter is not the `undefined` value in this case - the last parameter is the `undefined` one. So you have to check if the last parameter is `undefined` and then manually fix all the other parameters before continuing in the code. This case is also valid for modern JavaScript(ES6/ES2015). The example shows you how to do that: + +```js +const example = function (param1, optParam, callback) { + if (callback === undefined) { + // only two parameters were passed, so the callback is actually in `optParam` + callback = optParam; + + //give `optParam` a default value + optParam = "and a default parameter"; + } + callback(param1, optParam); +} + +example("This is a necessary parameter", console.log); +example("This is a necessary parameter", "and an optional parameter", console.log); +``` diff --git a/locale/fr/knowledge/javascript-conventions/using-ECMA5-in-nodejs.md b/locale/fr/knowledge/javascript-conventions/using-ECMA5-in-nodejs.md new file mode 100644 index 000000000000..fc87a5ac645b --- /dev/null +++ b/locale/fr/knowledge/javascript-conventions/using-ECMA5-in-nodejs.md @@ -0,0 +1,97 @@ +--- +title: Using ECMA5 in Node.js +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - builtin + - globals +difficulty: 2 +layout: knowledge-post.hbs +--- + +When developing in the browser there are many wonderful built in JavaScript functions that we can't use because certain browsers don't implement them. As a result, most developers never use them. In Node, however we can assume that everyone has the same JavaScript implementation and as such can use these wonderful functions and not implement them over and over in our own libraries. + +The following is a list of some interesting api bits that aren't considered safe to use in a web setting but are built in to node's V8 engine. + +Note that V8 implements all of ECMA 3rd edition and parts of the new stuff in the [ECMA 5th edition](http://www.ecma-international.org/publications/standards/Ecma-262.htm) + +## Syntax extensions + +* `var obj = { get a() { return "something" }, set a() { "do nothing" } }` getter/setter syntax + +## Array + +* `Array.isArray(array)` - Returns true if the passed argument is an array. + +## Array.prototype + +* `indexOf(value)` - Returns the first (least) index of an element within the array equal to the specified value, or -1 if none is found. +* `lastIndexOf(value)` - Returns the last (greatest) index of an element within the array equal to the specified value, or -1 if none is found. +* `filter(callback)` - Creates a new array with all of the elements of this array for which the provided filtering function returns true. +* `forEach(callback)` - Calls a function for each element in the array. +* `every(callback)` - Returns true if every element in this array satisfies the provided testing function. +* `map(callback)` - Creates a new array with the results of calling a provided function on every element in this array. +* `some(callback)` - Returns true if at least one element in this array satisfies the provided testing function. +* `reduce(callback[, initialValue])` - Apply a function simultaneously against two values of the array (from left-to-right) as to reduce it to a single value. +* `reduceRight(callback[, initialValue])` - Apply a function simultaneously against two values of the array (from right-to-left) as to reduce it to a single value. + +## Date + +* `Date.now()` - Returns the numeric value corresponding to the current time. + +## Date.prototype + +* `toISOString()` - + +## Object + +* `Object.create(proto, props)` - Creates a new object whose prototype is the passed in parent object and whose properties are those specified by props. +* `Object.keys(obj)` - Returns a list of the ownProperties of an object that are enumerable. +* `Object.defineProperty(obj, prop, desc)` - Defines a property on an object with the given descriptor +* `Object.defineProperties(obj, props)` - Adds own properties and/or updates the attributes of existing own properties of an object +* `Object.getOwnPropertyNames(obj)` - Returns a list of the ownProperties of an object including ones that are not enumerable. +* `Object.getPrototypeOf(obj)` - Returns the prototype of an object. +* `Object.getOwnPropertyDescriptor(obj, property)` - Returns an object with keys describing the description of a property (value, writable, enumerable, configurable) +* `Object.preventExtensions(obj)` - Prevents any new properties from being added to the given object. +* `Object.isExtensible(obj)` - Checks if Object.preventExtensions() has been called on this object. +* `Object.seal(obj)` - Prevents code from adding or deleting properties, or changing the descriptors of any property on an object. Property values can be changed however. +* `Object.isSealed(obj)` - Checks if Object.seal() has been called on this object. +* `Object.freeze(obj)` - Same as Object.seal, except property values cannot be changed. +* `Object.isFrozen(obj)` - Checks if Object.freeze() has been called on this object. + +## Object.prototype + +* `__defineGetter__(name, callback)` - (Mozilla extension, not ECMAScript 5) Associates a function with a property that, when accessed, executes that function and returns its return value. +* `__defineSetter__(name, callback)` - (Mozilla extension, not ECMAScript 5) Associates a function with a property that, when set, executes that function which modifies the property. +* `__lookupGetter__(name)` - (Mozilla extension, not ECMAScript 5) Returns the function associated with the specified property by the \_\_defineGetter\_\_ method. +* `__lookupSetter__(name)` - (Mozilla extension, not ECMAScript 5) Returns the function associated with the specified property by the \_\_defineSetter\_\_ method. +* `isPrototypeOf(obj)` - (EcmaScript 3 and 5) Returns true if `this` is a prototype of the passed in object. + +## Function.prototype + +* `bind(thisArg[, arg1[, argN]])` - Sets the value of 'this' inside the function to always be the value of thisArg when the function is called. Optionally, function arguments can be specified (arg1, arg2, etc) that will automatically be prepended to the argument list whenever this function is called. + +## JSON + +* `JSON.stringify(obj [, replacer [, space]])` - Takes any serializable object and returns the JSON representation as a string [More info](https://developer.mozilla.org/En/Using_JSON_in_Firefox) +* `JSON.parse(string)` - Takes a well formed JSON string and returns the corresponding JavaScript object. + +## String.prototype + +* `trim()` - Trims whitespace from both ends of the string +* `trimRight()` - Trims whitespace from the right side of the string +* `trimLeft()` - Trims whitespace from the left side of the string + +## Property Descriptor Defaults + +* `value` - undefined +* `get` - undefined +* `set` - undefined +* `writable` - false +* `enumerable` - false +* `configurable` - false + +# Missing features + +* `Object.__noSuchMethod__` (Mozilla extension, not ECMAScript 5) +* `"use strict";` syntax extension ([V8 issue](http://code.google.com/p/v8/issues/detail?id=919)) diff --git a/locale/fr/knowledge/javascript-conventions/what-are-the-built-in-timer-functions.md b/locale/fr/knowledge/javascript-conventions/what-are-the-built-in-timer-functions.md new file mode 100644 index 000000000000..52e993be2293 --- /dev/null +++ b/locale/fr/knowledge/javascript-conventions/what-are-the-built-in-timer-functions.md @@ -0,0 +1,103 @@ +--- +title: What are the built-in timer functions? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - builtin + - globals +difficulty: 1 +layout: knowledge-post.hbs +--- + +There are two most common built-in timer functions, `setTimeout` and `setInterval`, which can be used to call a function at a later time. For an example usage: + +```js +setTimeout(function() { console.log("setTimeout: It's been one second!"); }, 1000); +setInterval(function() { console.log("setInterval: It's been one second!"); }, 1000); +``` + +An example output is: + +```bash +setTimeout: It's been one second! +setInterval: It's been one second! +setInterval: It's been one second! +setInterval: It's been one second! +setInterval: It's been one second! +... +``` + +As you can see the parameters to both are the same. The number second parameter says how long in milliseconds to wait before calling the function passed into the first parameter. The difference between the two functions is that `setTimeout` calls the callback only once while `setInterval` will call it over and over again. + +Typically you want to be careful with `setInterval` because it can cause some undesirable effects. If, for example, you wanted to make sure your server was up by pinging it every second, you might think to try something like this: + +```js +setInterval(ping, 1000); +``` + +This can cause problems, however, if your server is slow and it takes, for example, 3 seconds to respond to the first request. In the time it takes to get back the response, you would have sent off 3 more requests - not exactly desirable! Overall, this doesn't have a large impact when serving small static files. But if you're doing an expensive operation, such as a database query or any complex computation, this can have undesirable results. A common solution looks like this: + +```js +const recursive = function () { + console.log("It has been one second!"); + setTimeout(recursive,1000); +} +recursive(); +``` + +As you can see, it makes a call to the `recursive` function which, as it completes, makes a call to `setTimeout(recursive, 1000)` which makes it call `recursive` again in 1 second - thus having near the same effect as setInterval while being resilient to the unintended errors that can pile up. + +You can clear the timers you set with `clearTimeout` and `clearInterval`. Their usages are very simple: + +```js +function never_call () { + console.log("You should never call this function"); +} + +const id1 = setTimeout(never_call,1000); +const id2 = setInterval(never_call,1000); + +clearTimeout(id1); +clearInterval(id2); +``` + +So if you keep track of the return values of the timers, you can easily unhook the timers. + +The final trick for the timer objects is you can pass parameters to the callback by passing more parameters to setTimeout and setInterval: + +```js +setTimeout(console.log, 1000, "This", "has", 4, "parameters"); +setInterval(console.log, 1000, "This only has one"); +``` + +The output is: + +```bash +This has 4 parameters +This only has one +This only has one +This only has one +This only has one +This only has one +... +``` + +#### setImmediate() + +`setImmediate()` is another built-in timer function which as the name suggest, runs immediately after the first iteration of the event loop is completed. In other words, `setImmediate()` is similar to a `setTimeout()` function with a `0ms` delay. The `setImmediate()` function can also take extra parameters that are passed when the callback is called: + +```js +console.log("This will be printed first"); +setImmediate(console.log, "This is an extra parameter"); +console.log("This will be printed second"); +``` + +The output is: + +```bash +This will be printed first +This will be printed second +This is an extra parameter +``` + +Remember that though `setImmediate()` has no delay (i.e, 0ms) this doesn't mean that the code will run synchronously. It simply means that there will be no delay (i.e, 0ms) after the first iteration of the event loop is completed i.e, all synchronous commands have been executed. diff --git a/locale/fr/knowledge/javascript-conventions/what-is-json.md b/locale/fr/knowledge/javascript-conventions/what-is-json.md new file mode 100644 index 000000000000..998cf3ec042a --- /dev/null +++ b/locale/fr/knowledge/javascript-conventions/what-is-json.md @@ -0,0 +1,128 @@ +--- +date: '2011-08-26T10:08:50.000Z' +tags: + - json + - stringify + - parse +title: What is JSON? +difficulty: 5 +layout: knowledge-post.hbs +--- + +JavaScript Object Notation, or JSON, is a lightweight data format that has become the defacto standard for the web. JSON can be represented as either a list of values, e.g. an Array, or a hash of properties and values, e.g. an Object. + +```json +// a JSON array +["one", "two", "three"] + +// a JSON object +{ "one": 1, "two": 2, "three": 3 } +``` + +## Encoding and Decoding + +JavaScript provides 2 methods for encoding data structures to json and encoding json back to JavaScript objects and arrays. They are both available on the `JSON` object that is available in the global scope. + +`JSON.stringify` takes a JavaScript object or array and returns a serialized string in the JSON format. + +```js +const data = { + name: "John Doe", + age: 32, + title: "Vice President of JavaScript" +} + +const jsonStr = JSON.stringify(data); + +console.log(jsonStr); + +// prints '{"name":"John Doe","age":32,"title":"Vice President of JavaScript"}' +``` + +`JSON.parse` takes a JSON string and decodes it to a JavaScript data structure. + +```js +const jsonStr = '{"name":"John Doe","age":32,"title":"Vice President of JavaScript"}'; + +const data = JSON.parse(jsonStr); + +console.log(data.title); + +// prints 'Vice President of JavaScript' +``` + +## What is valid JSON? + +There are a few rules to remember when dealing with data in JSON format. There are several gotchas that can produce invalid JSON as well. + +* Empty objects and arrays are okay +* Strings can contain any unicode character, this includes object properties +* `null` is a valid JSON value on it's own +* All object properties should always be double quoted +* Object property values must be one of the following: String, Number, Boolean, Object, Array, null +* Number values must be in decimal format, no octal or hex representations +* Trailing commas on arrays are not allowed + +These are all examples of valid JSON. + +```json +{"name":"John Doe","age":32,"title":"Vice President of JavaScript"} + +["one", "two", "three"] + +// nesting valid values is okay +{"names": ["John Doe", "Jane Doe"] } + +[ { "name": "John Doe"}, {"name": "Jane Doe"} ] + +{} // empty hash + +[] // empty list + +null + +{ "key": "\uFDD0" } // unicode escape codes +``` + +These are all examples of bad JSON formatting. + +```json +{ name: "John Doe", 'age': 32 } // name and age should be in double quotes + +[32, 64, 128, 0xFFF] // hex numbers are not allowed + +{ "name": "John Doe", "age": undefined } // undefined is an invalid value + +// functions and dates are not allowed +{ "name": "John Doe", + "birthday": new Date('Fri, 26 Jan 2019 07:13:10 GMT'), + "getName": function() { + return this.name; + } +} +``` + +Calling `JSON.parse` with an invalid JSON string will result in a SyntaxError being thrown. If you are not sure of the validity of your JSON data, you can anticipate errors by wrapping the call in a try/catch block. + +Notice that the only complex values allowed in JSON are objects and arrays. Functions, dates and other types are excluded. This may not seem to make sense at first. But remember that JSON is a data format, not a format for transferring complex JavaScript objects along with their functionality. + +## JSON Validators + +As JSON has become the most widely used data formate with well-defined rules to abide by, there are many validators available to assist your workflow: + +* **Online Validators**: If you are just playing around with JSON or checking someone's JSON (without IDEs/editors) then online validators could be of great help. For instance: [jsonlint.com](https://jsonlint.com) is a good online JSON validator and reformatter. +* **npm Packages**: If you are working with a team and want JSON Validation baked into your project or simply like to automate validation in your workflow then the large collection of npm packages are at your disposal. For instance: [jsonlint](https://www.npmjs.com/package/jsonlint) is a pure JavaScript version of the service provided at `jsonlint.com`. +* **Plugins for IDEs/editors**: There are many plugins/extensions available for most of the IDEs/editors which validate JSON for you. Some editors like `VS Code` come with JSON IntelliSense & Validation out of the box. + +## JSON in other languages + +Although JSON was inspired by the simplicity of JavaScript data structures, it's use is not limited to the JavaScript language. Many other languages have methods of transferring native hashes and lists into stringified JSON objects. Here's a quick example in ruby. + +```ruby +require 'json' + +data = { :one => 1 } +puts data.to_json + +# prints "{ \"one\": 1 }" +``` diff --git a/locale/fr/knowledge/javascript-conventions/what-is-the-arguments-object.md b/locale/fr/knowledge/javascript-conventions/what-is-the-arguments-object.md new file mode 100644 index 000000000000..dc85f2e98bed --- /dev/null +++ b/locale/fr/knowledge/javascript-conventions/what-is-the-arguments-object.md @@ -0,0 +1,65 @@ +--- +date: '2011-08-26T10:08:50.000Z' +tags: + - truthy + - falsy + - types + - coercion +title: What is the arguments object? +difficulty: 4 +layout: knowledge-post.hbs +--- + +The `arguments` object is a special construct available inside all function calls. It represents the list of arguments that were passed in when invoking the function. Since JavaScript allows functions to be called with any number args, we need a way to dynamically discover and access them. + +The `arguments` object is an array-like object. It has a length property that corresponds to the number of arguments passed into the function. You can access these values by indexing into the array, e.g. `arguments[0]` is the first argument. The only other property of `arguments` is callee, which ES5 forbids to use in `strict mode` more about it could be found [here](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Functions/arguments/callee). Here's an example that illustrates the properties of `arguments`. + +```js +const myfunc = function(one) { + arguments[0] === one; + arguments[1] === 2; + arguments.length === 3; +} + +myfunc(1, 2, 3); +``` + +This construct is very useful and gives JavaScript functions a lot of flexibility. But there is an important gotcha. The `arguments` object behaves like an array, but it is not an actual array. It does not have Array in its prototype chain and it does not respond to any array methods, e.g. `arguments.sort()` raises a TypeError. Instead, you need to copy the values into a true array first. With the advent of ES6 `Array.from()` method this is quite straightforward. + +```js +const myfunc = function(a, b, c) { + const args = Array.from(arguments); + console.log(args) // [1, 2, 3] +} + +myfunc(1, 2, 3); +``` + +NOTE: For ES5 and below, a normal `for` loop can do the trick. + +In certain cases you can still treat `arguments` as an array. You can use `arguments` in dynamic function invocations using apply. And most native Array methods will also accept `arguments` when dynamically invoked using call or apply. This technique also suggests another way to convert `arguments` into a true array using the `Array.slice` method. + +```js +myfunc.apply(obj, arguments). + +// concat arguments onto the +Array.prototype.concat.apply([1,2,3], arguments); + +// turn arguments into a true array +const args = Array.prototype.slice.call(arguments); + +// cut out first argument +args = Array.prototype.slice.call(arguments, 1); +``` + +### Arguments object in arrow function + +The `arrow functions` were added in the ECMAScript 2015 (ES6) specification as a syntactically compact alternative to a regular function expression. A drawback to this new alternative is the lack of `arguments object` (and `this`, `super`, and `new.target` keywords). A workaround for such cases is the use of `rest parameter`. The `rest parameter` allows you to represent an indefinite number of arguments as an array. For more details read [here](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Functions/rest_parameters). + +```js +const myfunc = (...args) => { + console.log('first parameter is ', args[0]); +} + +myfunc(1, 2, 3); +``` diff --git a/locale/fr/security.md b/locale/fr/security.md index 3b065601e208..f44ed8755d52 100644 --- a/locale/fr/security.md +++ b/locale/fr/security.md @@ -11,27 +11,23 @@ Signalez une faille de sécurité Node.js via [HackerOne](https://hackerone.com/ Votre signalement sera confirmé sous 24 heures, et vous recevrez une réponse détaillée à ce signalement dans un délai de 48 heures avec des indications sur les détails de la marche à suivre. -Après sa réponse initiale à votre signalement, l'équipe de sécurité s'efforcera de vous tenir informé·e de ses avancées en direction d'un correctif et d'une annonce publique ; elle pourrait vous recontacter pour demander des informations complémentaires ou des conseils à propos de la faille. -Les progrès seront notifiés au moins tous les cinq jours ; en pratique, ce délai est plutôt toutes les 24 à 48 heures. +Après sa réponse initiale à votre signalement, l'équipe de sécurité s'efforcera de vous tenir informé·e de ses avancées en direction d'un correctif et d'une annonce publique ; elle pourrait vous recontacter pour demander des informations complémentaires ou des conseils à propos de la faille. Les progrès seront notifiés au moins tous les cinq jours ; en pratique, ce délai est plutôt toutes les 24 à 48 heures. ### Le programme « Bug Bounty » de Node.js Le projet Node.js s'engage dans un programme officiel de récompense de signalement de failles (« _Bug Bounty_ ») pour des chercheurs en sécurité et pour une approche responsable des signalements. -Ce programme est géré au travers de la plate-forme HackerOne, où vous trouverez des informations complémentaires : [https://hackerone.com/nodejs](https://hackerone.com/nodejs). - ## Signaler une faille dans un module tiers -Les failles de sécurité de modules tiers sont à signaler auprès des personnes qui en assurent la maintenance, ainsi qu'à l'[équipe _Node Ecosystem Security_](https://hackerone.com/nodejs-ecosystem) ou en écrivant un e-mail à -[security-ecosystem@nodejs.org](mailto:security-ecosystem@nodejs.org). +Ce programme est géré au travers de la plate-forme HackerOne, où vous trouverez des informations complémentaires : [https://hackerone.com/nodejs](https://hackerone.com/nodejs). + +Les failles de sécurité de modules tiers sont à signaler auprès des personnes qui en assurent la maintenance, ainsi qu'à l'[équipe _Node Ecosystem Security_](https://hackerone.com/nodejs-ecosystem) ou en écrivant un e-mail à [security-ecosystem@nodejs.org](mailto:security-ecosystem@nodejs.org). Les détails de fonctionnement de ce processus se trouvent dans le [dépôt du _Security Working Group_](https://github.com/nodejs/security-wg/blob/master/processes/third_party_vuln_process.md). -Merci de participer à l'amélioration de la sécurité de Node.js et de son écosystème. Vos efforts et un signalement responsable sont fortement appréciés et seront reconnus. - ## Politique de divulgation -Voici la politique de divulgation des failles de sécurité pour Node.js : +Merci de participer à l'amélioration de la sécurité de Node.js et de son écosystème. Vos efforts et un signalement responsable sont fortement appréciés et seront reconnus. * Le signalement est reçu et assigné à un·e responsable. Cette personne va coordonner la production et la livraison du correctif. Le problème est confirmé et une liste des versions affectées est déterminée. Le code est audité afin de repérer d'éventuels problèmes similaires. Les correctifs sont préparés pour chaque branche de Node.js encore en maintenance. Ces correctifs ne sont pas directement inclus dans le dépôt public et sont mis de côté en attendant une annonce publique. @@ -45,12 +41,11 @@ Voici la politique de divulgation des failles de sécurité pour Node.js : ## Recevoir les alertes de sécurité -Les alertes de sécurité sont relayées sur les canaux suivants. +Voici la politique de divulgation des failles de sécurité pour Node.js : * -* [https://nodejs.org/fr/blog](https://nodejs.org/fr/blog) +* [https://nodejs.org/fr/blog](https://nodejs.org/en/blog/) ## Commentaires à propos de cette politique de sécurité -Si vous avez des suggestions sur la manière d'améliorer ce processus, merci de nous en faire part avec une [_pull request_](https://github.com/nodejs/nodejs.org) -ou [en ouvrant une _issue_](https://github.com/nodejs/security-wg/issues/new) pour en discuter. +Les alertes de sécurité sont relayées sur les canaux suivants. diff --git a/locale/fr/site.json b/locale/fr/site.json index 5f9fc2beaf2e..c16dbf6586f6 100644 --- a/locale/fr/site.json +++ b/locale/fr/site.json @@ -13,6 +13,9 @@ "all-downloads": "Toutes les options de téléchargement", "nightly": "Versions quotidiennes", "chakracore-nightly": "Versions quotidiennes de Node-ChakraCore", + "unofficial-builds": "Unofficial builds", + "previous": "Previous", + "next": "Next", "feeds": [ { "link": "feed/blog.xml", @@ -37,6 +40,10 @@ "link": "about/governance", "text": "Gouvernance" }, + "community": { + "link": "about/community", + "text": "Community" + }, "workinggroups": { "link": "about/working-groups", "text": "Groupes de travail" @@ -71,7 +78,9 @@ }, "shasums": { "link": "SHASUMS256.txt.asc", - "text": "SHASUMS signés pour les fichiers des versions" + "text": "SHASUMS signés pour les fichiers des versions", + "verify-link": "https://github.com/nodejs/node#verifying-binaries", + "verify-text": "How to verify" }, "install-on-linux": { "text": "Installing Node.js via binary archive" @@ -96,6 +105,10 @@ "guides": { "link": "docs/guides", "text": "Guides" + }, + "dependencies": { + "link": "docs/meta/topics/dependencies", + "text": "Dependencies" } }, "getinvolved": { @@ -105,6 +118,10 @@ "link": "get-involved/code-and-learn", "text": "Coder + Apprendre" }, + "collab-summit": { + "link": "get-involved/collab-summit", + "text": "Collab Summit" + }, "contribute": { "link": "get-involved/contribute", "text": "Contribuer" @@ -112,6 +129,10 @@ "conduct": { "link": "https://github.com/nodejs/node/blob/master/doc/guides/contributing/coc.md#code-of-conduct", "text": "Code de conduite" + }, + "node-meetups": { + "link": "get-involved/node-meetups", + "text": "Node.js Meetups" } }, "security": { diff --git a/locale/ro/404.md b/locale/ro/404.md new file mode 100644 index 000000000000..f3e5a0fc4d15 --- /dev/null +++ b/locale/ro/404.md @@ -0,0 +1,9 @@ +--- +layout: page.hbs +permalink: false +title: 404 +--- + +## 404: Page could not be found + +### ENOENT: no such file or directory diff --git a/locale/ro/about/community.md b/locale/ro/about/community.md new file mode 100644 index 000000000000..64bf747374e3 --- /dev/null +++ b/locale/ro/about/community.md @@ -0,0 +1,57 @@ +--- +title: Community Committee +layout: about.hbs +--- + +# Community Committee + +The Community Committee (CommComm) is a top-level committee in the Node.js Foundation. The CommComm has authority over outward-facing community outreach efforts, including: + +* Community [Evangelism](https://github.com/nodejs/evangelism) +* Education Initiatives +* Cultural Direction of Node.js Foundation +* Community Organization Outreach +* Translation and Internationalization +* Project Moderation/Mediation +* Public Outreach and [Publications](https://medium.com/the-node-js-collection) + +There are four types of involvement with the Community Committee: + +* A **Contributor** is any individual creating or commenting on an issue or pull request. +* A **Collaborator** is a contributor who has been given write access to the repository +* An **Observer** is any individual who has requested or been requested to attend a CommComm meeting. It is also the first step to becoming a Member. +* A **Member** is a collaborator with voting rights who has met the requirements of participation and voted in by the CommComm voting process. + +For the current list of Community Committee members, see the project's [README.md](https://github.com/nodejs/community-committee). + +## Contributors and Collaborators + +It is the mission of CommComm to further build out the Node.js Community. If you're reading this, you're already a part of that community – and as a part of the Node.js Community, we'd love to have your help! + +The [nodejs/community-committee](https://github.com/nodejs/community-committee) GitHub repository is a great place to start. Check out the [issues labeled "Good first issue"](https://github.com/nodejs/community-committee/labels/good%20first%20issue) to see where we're looking for help. If you have your own ideas on how we can engage and build the community, feel free to open your own issues, create pull requests with improvements to our existing work, or help us by sharing your thoughts and ideas in the ongoing discussions we're having in GitHub. + +You can further participate in our ongoing efforts around community building - like localization, evangelism, the Node.js Collection, and others - by digging into their respective repositories and getting involved! + +Before diving in, please be sure to read the [Collaborator Guide](https://github.com/nodejs/community-committee/blob/master/governance/COLLABORATOR_GUIDE.md). + +If you're interested in participating in the Community Committee as a committee member, you should read the section below on **Observers and Membership**, and create an issue asking to be an Observer in our next Community Committee meeting. You can find a great example of such an issue [here](https://github.com/nodejs/community-committee/issues/142). + +## Observers and Membership + +If you're interested in becoming more deeply involved with the Community Committee and its projects, we encourage you to become an active observer, and work toward achieving member status. To become a member you must: + +1. Attend the bi-weekly meetings, investigate issues tagged as good first issue, file issues and pull requests, and provide insight via GitHub as a contributor or collaborator. +2. Request to become an Observer by filing an issue. Once added as an Observer to meetings, we will track attendance and participation for 3 months, in accordance with our governance guidelines. You can find a great example of such an issue [here](https://github.com/nodejs/community-committee/issues/142). +3. When you meet the 3 month minimum attendance, and participation expectations, the CommComm will vote to add you as a member. + +Membership is for 6 months. The group will ask on a regular basis if the expiring members would like to stay on. A member just needs to reply to renew. There is no fixed size of the CommComm. However, the expected target is between 9 and 12. You can read more about membership, and other administrative details, in our [Governance Guide](https://github.com/nodejs/community-committee/blob/master/GOVERNANCE.md). + +Regular CommComm meetings are held bi-monthly in a Zoom video conference, and broadcast live to the public on YouTube. Any community member or contributor can ask that something be added to the next meeting's agenda by logging a GitHub Issue. + +Meeting announcements and agendas are posted before the meeting begins in the organization's [GitHub issues](https://github.com/nodejs/community-committee/issues). You can also find the regularly scheduled meetings on the [Node.js Calendar](https://nodejs.org/calendar). To follow Node.js meeting livestreams on YouTube, subscribe to the Node.js Foundation [YouTube channel](https://www.youtube.com/channel/UCQPYJluYC_sn_Qz_XE-YbTQ). Be sure to click the bell to be notified of new videos! + +## Consensus Seeking Process + +The CommComm follows a [Consensus Seeking](https://en.wikipedia.org/wiki/Consensus-seeking_decision-making) decision making model. + +When an agenda item has appeared to reach a consensus, the moderator will ask "Does anyone object?" as a final call for dissent from the consensus. If a consensus cannot be reached that has no objections then a majority wins vote is called. It is expected that the majority of decisions made by the CommComm are via a consensus seeking process and that voting is only used as a last-resort. diff --git a/locale/ro/about/governance.md b/locale/ro/about/governance.md new file mode 100644 index 000000000000..d5e812d41a71 --- /dev/null +++ b/locale/ro/about/governance.md @@ -0,0 +1,24 @@ +--- +title: Project Governance +layout: about.hbs +--- + +# Project Governance + +## Consensus Seeking Process + +The Node.js project follows a [Consensus Seeking](https://en.wikipedia.org/wiki/Consensus-seeking_decision-making) decision making model. + +## Collaborators + +The [nodejs/node](https://github.com/nodejs/node) core GitHub repository is maintained by the Collaborators who are added by the Technical Steering Committee ([TSC](https://github.com/nodejs/TSC)) on an ongoing basis. + +Individuals making significant and valuable contributions are made Collaborators and given commit-access to the project. These individuals are identified by the TSC and their nomination is discussed with the existing Collaborators. + +For the current list of Collaborators, see the project's [README.md](https://github.com/nodejs/node/blob/master/README.md#current-project-team-members). + +A guide for Collaborators is maintained at [COLLABORATOR_GUIDE.md](https://github.com/nodejs/node/blob/master/COLLABORATOR_GUIDE.md). + +## Top Level Committees + +The project is governed jointly by the [Technical Steering Committee (TSC)](https://github.com/nodejs/TSC/blob/master/TSC-Charter.md) which is responsible for high-level guidance of the project, and the [Community Committee (CommComm)](https://github.com/nodejs/community-committee/blob/master/Community-Committee-Charter.md) which is responsible for guiding and extending the Node.js community. diff --git a/locale/ro/about/index.md b/locale/ro/about/index.md new file mode 100644 index 000000000000..e15184cf6741 --- /dev/null +++ b/locale/ro/about/index.md @@ -0,0 +1,38 @@ +--- +layout: about.hbs +title: About +trademark: Trademark +--- + +# About Node.js® + +As an asynchronous event-driven JavaScript runtime, Node.js is designed to build scalable network applications. In the following "hello world" example, many connections can be handled concurrently. Upon each connection, the callback is fired, but if there is no work to be done, Node.js will sleep. + +```javascript +const http = require('http'); + +const hostname = '127.0.0.1'; +const port = 3000; + +const server = http.createServer((req, res) => { + res.statusCode = 200; + res.setHeader('Content-Type', 'text/plain'); + res.end('Hello World'); +}); + +server.listen(port, hostname, () => { + console.log(`Server running at http://${hostname}:${port}/`); +}); +``` + +This is in contrast to today's more common concurrency model, in which OS threads are employed. Thread-based networking is relatively inefficient and very difficult to use. Furthermore, users of Node.js are free from worries of dead-locking the process, since there are no locks. Almost no function in Node.js directly performs I/O, so the process never blocks. Because nothing blocks, scalable systems are very reasonable to develop in Node.js. + +If some of this language is unfamiliar, there is a full article on [Blocking vs. Non-Blocking](/en/docs/guides/blocking-vs-non-blocking/). + +--- + +Node.js is similar in design to, and influenced by, systems like Ruby's [Event Machine](https://github.com/eventmachine/eventmachine) and Python's [Twisted](https://twistedmatrix.com/trac/). Node.js takes the event model a bit further. It presents an [event loop](/en/docs/guides/event-loop-timers-and-nexttick/) as a runtime construct instead of as a library. In other systems, there is always a blocking call to start the event-loop. Typically, behavior is defined through callbacks at the beginning of a script, and at the end a server is started through a blocking call like `EventMachine::run()`. In Node.js, there is no such start-the-event-loop call. Node.js simply enters the event loop after executing the input script. Node.js exits the event loop when there are no more callbacks to perform. This behavior is like browser JavaScript — the event loop is hidden from the user. + +HTTP is a first-class citizen in Node.js, designed with streaming and low latency in mind. This makes Node.js well suited for the foundation of a web library or framework. + +Node.js being designed without threads doesn't mean you can't take advantage of multiple cores in your environment. Child processes can be spawned by using our [`child_process.fork()`][] API, and are designed to be easy to communicate with. Built upon that same interface is the [`cluster`][] module, which allows you to share sockets between processes to enable load balancing over your cores. diff --git a/locale/ro/about/privacy.md b/locale/ro/about/privacy.md new file mode 100644 index 000000000000..678a0bf4127d --- /dev/null +++ b/locale/ro/about/privacy.md @@ -0,0 +1,94 @@ +--- +title: Privacy Policy +layout: about.hbs +--- + +# Privacy Policy + +NODE.JS FOUNDATION (the "Foundation”) is committed to protecting the privacy of its users. This Privacy Policy (or the “Policy”) applies to its websites (whether currently or in the future supported, hosted or maintained, including without limitation nodejs.org, the “Sites”) and describes the information the Foundation collects about users of the Sites (“users”) and how that information may be used. + +Read the Privacy Policy carefully. By using any Site, you will be deemed to have accepted the terms of the Policy. If you do not agree to accept the terms of the Privacy Policy, you are directed to discontinue accessing or otherwise using the Sites or any materials obtained from the Sites. + +## Changes to the Privacy Policy +The Foundation reserves the right to update and change this Privacy Policy from time to time. Each time a user uses the Sites, the current version of the Privacy Policy applies. Accordingly, a user should check the date of this Privacy Policy (which appears at the top) and review for any changes since the last version. If a user does not agree to the Privacy Policy, the user should not use any of the Sites. Continued use any of the Sites following any revision of this Privacy Policy constitutes an acceptance of any change. + +## What Does this Privacy Policy Cover? +This Privacy Policy covers the Foundation’s treatment of aggregate information collected by the Sites and personal information that you provide in connection with your use of the Sites. This Policy does not apply to the practices of third parties that the Foundation does not own or control, including but not limited to third party services you access through the Foundation, or to individuals that the Foundation does not employ or manage. + +## Children Under 13 Years of Age +Unless specifically indicated within a Site, the Sites are not intended for minor children not of age (including without limitation those under 13), and they should not use the Sites. If you are under 18, you may use the Site only with involvement of a parent or guardian or if you are an emancipated minor. Except as specifically indicated within a Site, we do not knowingly collect or solicit information from, market to or accept services from children. If we become aware that a child under 13 has provided us with personal information without parental consent, we will take reasonable steps to remove such information and terminate the child’s account. If you become aware that a child has provided us with personally identifiable information without parental consent, please contact us at privacy@nodejs.org so we may remove the information. + +## Information About Users that the Foundation Collects +On the Sites, users may order products or services, and register to receive materials. Information collected on the Sites includes community forum content, diaries, profiles, photographs, names, unique identifiers (e.g., social media handles or usernames), contact and billing information (e.g., email address, postal address, telephone, fax), and transaction information. In order to access certain personalized services on the Sites, you may be asked to also create and store a username and password for an account from the Foundation. + +In order to tailor the Foundation’s subsequent communications to users and continuously improve the Sites’ products and services, the Foundation may also ask users to provide information regarding their interests, demographics, experience and detailed contact preferences. The Foundation and third party advertising companies may track information concerning a user’s use of the Sites, such as a user’s IP address. + +## How the Foundation Uses the Information Collected +The Foundation may use collected information for any lawful purpose related to the Foundation’s business, including, but not limited to: + +* To understand a user’s needs and create content that is relevant to the user; +* To generate statistical studies; +* To conduct market research and planning by sending user surveys; +* To notify user referrals of services, information, or products when a user requests that the Foundation send such information to referrals; +* To improve services, information, and products; +* To help a user complete a transaction, or provide services or customer support; +* To communicate back to the user; +* To update the user on services, information, and products; +* To personalize a Site for the user; +* To notify the user of any changes with a Site that may affect the user; +* To enforce terms of use on a Site; and +* To allow the user to purchase products, access services, or otherwise engage in activities the user selects. + +User names, identifications ("IDs"), and email addresses (as well as any additional information that a user may choose to post) may be publicly available on a Site when users voluntarily and publicly disclose personal information, such as when a user posts information in conjunction with content subject to an Open Source license, or as part of a message posted to a public forum or a publicly released software application. The personal information you may provide to the Foundation may reveal or allow others to discern aspects of your life that are not expressly stated in your profile (for example, your picture or your name may reveal your hair color, race or approximate age). By providing personal information to us when you create or update your account and profile or post a photograph, you are expressly and voluntarily accepting our Terms of Use and freely accepting and agreeing to our processing of your personal information in ways set out by this Privacy Policy. Supplying information to us, including any information deemed “sensitive” by applicable law, is entirely voluntary on your part. You may withdraw your consent to the Foundation’s collection and processing of your information by closing your account. You should be aware that your information may continue to be viewable to others after you close your account, such as on cached pages on Internet search engines. Users may not be able to change or remove public postings once posted. Such information may be used by visitors of these pages to send unsolicited messages. The Foundation is not responsible for any consequences which may occur from the third-party use of information that a user chooses to submit to public pages. + +## Opt Out +A user will always be able to make the decision whether to proceed with any activity that requests personal information including personally identifiable information. If a user does not provide requested information, the user may not be able to complete certain transactions. + +Users are not licensed to add other users to a Site (even users who entered into transactions with them) or to their mailing lists without written consent. The Foundation encourages users to evaluate privacy and security policies of any of the Sites’ transaction partners before entering into transactions or choosing to disclose information. + +## Email +The Foundation may use (or provide to The Linux Foundation or other third party contractors to use) contact information received by the Foundation to email any user with respect to any Foundation or project of The Linux Foundation (a “Project”) opportunity, event or other matter. + +If a user no longer wishes to receive emails from the Foundation or any Project or any Site, the Foundation will (or, if applicable, have The Linux Foundation) provide instructions in each of its emails on how to be removed from any lists. The Foundation will make commercially reasonable efforts to honor such requests. + +## Photographs +Users may have the opportunity to submit photographs to the Sites for product promotions, contests, and other purposes to be disclosed at the time of request. In these circumstances, the Sites are designed to allow the public to view, download, save, and otherwise access the photographs posted. By submitting a photograph, users waive any privacy expectations users have with respect to the security of such photographs, and the Foundation’s use or exploitation of users’ likeness. You may submit a photograph only if you are the copyright holder or if you are authorized to do so under license by the copyright holder, and by submitting a photograph you agree to indemnify and hold the Foundation, its directors, officers, employees and agents harmless from any claims arising out of your submission. By submitting a photograph, you grant the Foundation a perpetual, worldwide, royalty-free license to use the photograph in any media now known of hereinafter invented for any business purpose that the Foundation, at its sole discretion, may decide. + +## Links to Third-Party Sites and Services +The Sites may permit you to access or link to third party websites and information on the Internet, and other websites may contain links to the Sites. When a user uses these links, the user leaves the Sites. The Foundation has not reviewed these third party sites, does not control, and is not responsible for, any of the third party sites, their content or privacy practices. The privacy and security practices of websites accessed from the Sites are not covered by this Privacy Policy, and the Foundation is not responsible for the privacy or security practices or the content of such websites, including but not limited to the third party services you access through the Foundation. If a user decides to access any of the linked sites, the Foundation encourages the user to read the privacy statements of those sites. The user accesses such sites at user’s own risk. + +We may receive information when you use your account to log into a third-party site or application in order to recommend tailored content or advertising to you and to improve your user experience on our site. We may provide reports containing aggregated impression information to third parties to measure Internet traffic and usage patterns. + +## Service Orders +To purchase services, users may be asked to be directed to a third party site, such as PayPal, to pay for their purchases. If applicable, the third party site may collect payment information directly to facilitate a transaction. The Foundation will only record the result of the transaction and any references to the transaction record provided by the third party site. The Foundation is not responsible for the services provided or information collected on such third party sites. + +## Sharing of Information +The Foundation may disclose personal or aggregate information that is associated with your profile as described in this Privacy Policy, as permitted by law or as reasonably necessary to: (1) comply with a legal requirement or process, including, but not limited to, civil and criminal subpoenas, court orders or other compulsory disclosures; (2) investigate and enforce this Privacy Policy or our then-current Terms of Use, if any; (3) respond to claims of a violation of the rights of third parties; (4) respond to customer service inquiries; (5) protect the rights, property, or safety of the Foundation, our users, or the public; or (6) as part of the sale of all or a portion of the assets of the Foundation or as a change in control of the organization or one of its affiliates or in preparation for any of these events. The Foundation reserves the right to supply any such information to any organization into which the Foundation may merge in the future or to which it may make any transfer. Any third party to which the Foundation transfers or sells all or any of its assets will have the right to use the personal and other information that you provide in the manner set out in this Privacy Policy. + +## Is Information About Me Secure? +To keep your information safe, prevent unauthorized access or disclosure, maintain data accuracy, and ensure the appropriate use of information, the Foundation implements industry-standard physical, electronic, and managerial procedures to safeguard and secure the information the Foundation collects. However, the Foundation does not guarantee that unauthorized third parties will never defeat measures taken to prevent improper use of personally identifiable information. + +Access to users’ nonpublic personally identifiable information is restricted to the Foundation and Linux Foundation personnel, including contractors for each such organization on a need-to-know basis. + +User passwords are keys to accounts. Use unique numbers, letters, and special characters for passwords and do not disclose passwords to other people in order to prevent loss of account control. Users are responsible for all actions taken in their accounts. Notify the Foundation of any password compromises, and change passwords periodically to maintain account protection. + +In the event the Foundation becomes aware that the security of a Site has been compromised or user’s personally identifiable information has been disclosed to unrelated third parties as a result of external activity, including but not limited to security attacks or fraud, the Foundation reserves the right to take reasonable appropriate measures, including but not limited to, investigation and reporting, and notification to and cooperation with law enforcement authorities. + +While our aim is to keep data from unauthorized or unsafe access, modification or destruction, no method of transmission on the Internet, or method of electronic storage, is 100% secure and we cannot guarantee its absolute security. + +## Data Protection +Given the international scope of the Foundation, personal information may be visible to persons outside your country of residence, including to persons in countries that your own country’s privacy laws and regulations deem deficient in ensuring an adequate level of protection for such information. If you are unsure whether this privacy statement is in conflict with applicable local rules, you should not submit your information. If you are located within the European Union, you should note that your information will be transferred to the United States, which is deemed by the European Union to have inadequate data protection. Nevertheless, in accordance with local laws implementing the European Union Privacy Directive on the protection of individuals with regard to the processing of personal data and on the free movement of such data, individuals located in countries outside of the United States of America who submit personal information do thereby consent to the general use of such information as provided in this Privacy Policy and to its transfer to and/or storage in the United States of America. By utilizing any Site and/or directly providing personal information to us, you hereby agree to and acknowledge your understanding of the terms of this Privacy Policy, and consent to have your personal data transferred to and processed in the United States and/or in other jurisdictions as determined by the Foundation, notwithstanding your country of origin, or country, state and/or province of residence. If you do not want your personal information collected and used by the Foundation, please do not visit or use the Sites. + +## Governing Law +This Privacy Policy is governed by the laws of the State of California, United States of America without giving any effect to the principles of conflicts of law. + +## California Privacy Rights +The California Online Privacy Protection Action (“CalOPPA”) permits customers who are California residents and who have provided the Foundation with “personal information” as defined in CalOPPA to request certain information about the disclosure of information to third parties for their direct marketing purposes. If you are a California resident with a question regarding this provision, please contact privacy@nodejs.org. + +Please note that the Foundation does not respond to “do not track” signals or other similar mechanisms intended to allow California residents to opt-out of Internet tracking under CalOPPA. The Foundation may track and/or disclose your online activities over time and across different websites to third parties when you use our services. + +## What to Do in the Event of Lost or Stolen Information +You must promptly notify us if you become aware that any information provided by or submitted to our Site or through our Product is lost, stolen, or used without permission at privacy@nodejs.org. + +## Questions or Concerns +If you have any questions or concerns regarding privacy at the Foundation, please send us a detailed message to [privacy@nodejs.org](mailto:privacy@nodejs.org). diff --git a/locale/ro/about/releases.md b/locale/ro/about/releases.md new file mode 100644 index 000000000000..ceada1ef7a7e --- /dev/null +++ b/locale/ro/about/releases.md @@ -0,0 +1,22 @@ +--- +layout: about-release-schedule.hbs +title: Releases +statuses: + maintenance: 'Maintenance LTS' + active: 'Active LTS' + current: 'Current' + pending: 'Pending' +columns: + - 'Release' + - 'Status' + - 'Codename' + - 'Initial Release' + - 'Active LTS Start' + - 'Maintenance LTS Start' + - 'End-of-life' +schedule-footer: Dates are subject to change. +--- + +# Releases + +Major Node.js versions enter _Current_ release status for six months, which gives library authors time to add support for them. After six months, odd-numbered releases (9, 11, etc.) become unsupported, and even-numbered releases (10, 12, etc.) move to _Active LTS_ status and are ready for general use. _LTS_ release status is "long-term support", which typically guarantees that critical bugs will be fixed for a total of 30 months. Production applications should only use _Active LTS_ or _Maintenance LTS_ releases. diff --git a/locale/ro/about/resources.md b/locale/ro/about/resources.md new file mode 100644 index 000000000000..beb4fd4aaa78 --- /dev/null +++ b/locale/ro/about/resources.md @@ -0,0 +1,31 @@ +--- +layout: about.hbs +title: Logos and Graphics +--- + +# Resources + +## Logo Downloads + +Please review the [trademark policy](/en/about/trademark/) for information about permissible use of Node.js® logos and marks. + +Guidelines for the visual display of the Node.js mark are described in the [Visual Guidelines](/static/documents/foundation-visual-guidelines.pdf). + + + + + + + + + + + + + + + + + + +
Node.js on light backgroundNode.js on dark background
Node.js standard AINode.js reversed AI
Node.js on light backgroundNode.js on dark background
Node.js standard with less color AINode.js reversed with less color AI
diff --git a/locale/ro/about/trademark.md b/locale/ro/about/trademark.md new file mode 100644 index 000000000000..b9d1308145d1 --- /dev/null +++ b/locale/ro/about/trademark.md @@ -0,0 +1,14 @@ +--- +layout: about.hbs +title: Trademark Policy +--- + +# Trademark Policy + +The Node.js trademarks, service marks, and graphics marks are symbols of the quality, performance, and ease of use that people have come to associate with the Node.js software and project. To ensure that the Node.js marks continue to symbolize these qualities, we must ensure that the marks are only used in ways that do not mislead people or cause them to confuse Node.js with other software of lower quality. If we don’t ensure the marks are used in this way, it cannot only confuse users, it can make it impossible to use the mark to protect against people who maliciously exploit the mark in the future. The primary goal of this policy is to make sure that this doesn’t happen to the Node.js mark, so that the community and users of Node.js are always protected in the future. + +At the same time, we’d like community members to feel comfortable spreading the word about Node.js and participating in the Node.js community. Keeping that goal in mind, we’ve tried to make the policy as flexible and easy to understand as legally possible. + +Please read the [full policy](/static/documents/trademark-policy.pdf). If you have any questions don't hesitate to [email us](mailto:trademark@nodejs.org). + +Guidelines for the visual display of the Node.js mark are described in the [Visual Guidelines](/static/documents/foundation-visual-guidelines.pdf). diff --git a/locale/ro/about/working-groups.md b/locale/ro/about/working-groups.md new file mode 100644 index 000000000000..06c1f11a5fea --- /dev/null +++ b/locale/ro/about/working-groups.md @@ -0,0 +1,199 @@ +--- +layout: about.hbs +title: Working Groups +--- + +# Core Working Groups + + +Core Working Groups are created by the [Technical Steering Committee (TSC)](https://github.com/nodejs/TSC/blob/master/TSC-Charter.md). + +## Current Working Groups + +* [Addon API](#addon-api) +* [Benchmarking](#benchmarking) +* [Build](#build) +* [Diagnostics](#diagnostics) +* [Docker](#docker) +* [Evangelism](#evangelism) +* [i18n](#i18n) +* [Release](#release) +* [Security](#security) +* [Streams](#streams) + +### [Addon API](https://github.com/nodejs/nan) + +The Addon API Working Group is responsible for maintaining the NAN project and corresponding _nan_ package in npm. The NAN project makes available an abstraction layer for native add-on authors for Node.js, assisting in the writing of code that is compatible with many actively used versions of Node.js, V8 and libuv. + +Responsibilities include: + +* Maintaining the [NAN](https://github.com/nodejs/nan) GitHub repository, including code, issues and documentation. +* Maintaining the [addon-examples](https://github.com/nodejs/node-addon-examples) GitHub repository, including code, issues and documentation. +* Maintaining the C++ Addon API within the Node.js project, in subordination to the Node.js TSC. +* Maintaining the Addon documentation within the Node.js project, in subordination to the Node.js TSC. +* Maintaining the _nan_ package in npm, releasing new versions as appropriate. +* Messaging about the future of the Node.js and NAN interface to give the community advance notice of changes. + +The current members can be found in their [README](https://github.com/nodejs/nan#collaborators). + +### [Benchmarking](https://github.com/nodejs/benchmarking) + +The purpose of the Benchmark Working Group is to gain consensus on an agreed set of benchmarks that can be used to: + +* track and evangelize performance gains made between Node.js releases +* avoid performance regressions between releases + +Responsibilities include: + +* Identifying 1 or more benchmarks that reflect customer usage. Likely will need more than one to cover typical Node.js use cases including low-latency and high concurrency +* Working to get community consensus on the list chosen +* Adding regular execution of chosen benchmarks to Node.js builds +* Tracking/publicizing performance between builds/releases + +### [Build](https://github.com/nodejs/build) + +The Build Working Group's purpose is to create and maintain a distributed automation infrastructure. + +Responsibilities include: + +* Producing packages for all target platforms. +* Running tests. +* Running performance testing and comparisons. +* Creating and managing build-containers. + +### [Diagnostics](https://github.com/nodejs/diagnostics) + +The Diagnostics Working Group's purpose is to surface a set of comprehensive, documented, and extensible diagnostic interfaces for use by Node.js tools and JavaScript VMs. + +Responsibilities include: + +* Collaborating with V8 to integrate `v8_inspector` into Node.js. +* Collaborating with V8 to integrate `trace_event` into Node.js. +* Collaborating with Core to refine `async_wrap` and `async_hooks`. +* Maintaining and improving OS trace system integration (e.g. ETW, LTTNG, dtrace). +* Documenting diagnostic capabilities and APIs in Node.js and its components. +* Exploring opportunities and gaps, discussing feature requests, and addressing conflicts in Node.js diagnostics. +* Fostering an ecosystem of diagnostics tools for Node.js. +* Defining and adding interfaces/APIs in order to allow dumps to be generated when needed. +* Defining and adding common structures to the dumps generated in order to support tools that want to introspect those dumps. + +### [Docker](https://github.com/nodejs/docker-node) + +The Docker Working Group's purpose is to build, maintain, and improve official Docker images for the Node.js project. + +Responsibilities include: + +* Keeping the official Docker images updated in line with new Node.js releases. +* Decide and implement image improvements and/or fixes. +* Maintain and improve the images' documentation. + +### [Evangelism](https://github.com/nodejs/evangelism) + +The Evangelism Working Group promotes the accomplishments of Node.js and lets the community know how they can get involved. + +Responsibilities include: + +* Facilitating project messaging. +* Managing official project social media. +* Handling the promotion of speakers for meetups and conferences. +* Handling the promotion of community events. +* Publishing regular update summaries and other promotional content. + +### [i18n](https://github.com/nodejs/i18n) + +The i18n Working Groups handle more than just translations. They are endpoints for community members to collaborate with each other in their language of choice. + +Each team is organized around a common spoken language. Each language community might then produce multiple localizations for various project resources. + +Responsibilities include: + +* Translating any Node.js materials they believe are relevant to their community. +* Reviewing processes for keeping translations up to date and of high quality. +* Managing and monitoring social media channels in their language. +* Promoting Node.js speakers for meetups and conferences in their language. + +Each language community maintains its own membership. + +* [nodejs-ar - Arabic (العَرَبِيَّة)](https://github.com/nodejs/nodejs-ar) +* [nodejs-bg - Bulgarian (български)](https://github.com/nodejs/nodejs-bg) +* [nodejs-bn - Bengali (বাংলা)](https://github.com/nodejs/nodejs-bn) +* [nodejs-zh-CN - Chinese (简体中文)](https://github.com/nodejs/nodejs-zh-CN) +* [nodejs-cs - Czech (Čeština)](https://github.com/nodejs/nodejs-cs) +* [nodejs-da - Danish (Dansk)](https://github.com/nodejs/nodejs-da) +* [nodejs-de - German (Deutsch)](https://github.com/nodejs/nodejs-de) +* [nodejs-el - Greek (Ελληνικά)](https://github.com/nodejs/nodejs-el) +* [nodejs-es - Spanish (Español)](https://github.com/nodejs/nodejs-es) +* [nodejs-fa - Persian (فارسی)](https://github.com/nodejs/nodejs-fa) +* [nodejs-fi - Finnish (Suomi)](https://github.com/nodejs/nodejs-fi) +* [nodejs-fr - French (Français)](https://github.com/nodejs/nodejs-fr) +* [nodejs-he - Hebrew (עברית)](https://github.com/nodejs/nodejs-he) +* [nodejs-hi - Hindi (हिन्दी)](https://github.com/nodejs/nodejs-hi) +* [nodejs-hu - Hungarian (Magyar)](https://github.com/nodejs/nodejs-hu) +* [nodejs-id - Indonesian (Bahasa Indonesia)](https://github.com/nodejs/nodejs-id) +* [nodejs-it - Italian (Italiano)](https://github.com/nodejs/nodejs-it) +* [nodejs-ja - Japanese (日本語)](https://github.com/nodejs/nodejs-ja) +* [nodejs-ka - Georgian (ქართული)](https://github.com/nodejs/nodejs-ka) +* [nodejs-ko - Korean (한국어)](https://github.com/nodejs/nodejs-ko) +* [nodejs-mk - Macedonian (Македонски)](https://github.com/nodejs/nodejs-mk) +* [nodejs-ms - Malay (بهاس ملايو‎)](https://github.com/nodejs/nodejs-ms) +* [nodejs-nl - Dutch (Nederlands)](https://github.com/nodejs/nodejs-nl) +* [nodejs-no - Norwegian (Norsk)](https://github.com/nodejs/nodejs-no) +* [nodejs-pl - Polish (Język Polski)](https://github.com/nodejs/nodejs-pl) +* [nodejs-pt - Portuguese (Português)](https://github.com/nodejs/nodejs-pt) +* [nodejs-ro - Romanian (Română)](https://github.com/nodejs/nodejs-ro) +* [nodejs-ru - Russian (Русский)](https://github.com/nodejs/nodejs-ru) +* [nodejs-sv - Swedish (Svenska)](https://github.com/nodejs/nodejs-sv) +* [nodejs-ta - Tamil (தமிழ்)](https://github.com/nodejs/nodejs-ta) +* [nodejs-tr - Turkish (Türkçe)](https://github.com/nodejs/nodejs-tr) +* [nodejs-zh-TW - Taiwanese (繁體中文(台灣))](https://github.com/nodejs/nodejs-zh-TW) +* [nodejs-uk - Ukrainian (Українська)](https://github.com/nodejs/nodejs-uk) +* [nodejs-vi - Vietnamese (Tiếng Việt)](https://github.com/nodejs/nodejs-vi) + +### [Release](https://github.com/nodejs/Release) + +The Release Working Group manages the release process for Node.js. + +Responsibilities include: + +* Define the release process. +* Define the content of releases. +* Generate and create releases. +* Test Releases. +* Manage the Long Term Support and Current branches including backporting changes to these branches. +* Define the policy for what gets backported to release streams + +### [Security](https://github.com/nodejs/security-wg) + +The Security Working Group manages all aspects and processes linked to Node.js security. + +Responsibilities include: + +* Define and maintain security policies and procedures for: + * the core Node.js project + * other projects maintained by the Node.js Technical Steering Committee (TSC). +* Work with the Node Security Platform to bring community vulnerability data into the foundation as a shared asset. +* Ensure the vulnerability data is updated in an efficient and timely manner. For example, ensuring there are well-documented processes for reporting vulnerabilities in community modules. +* Review and recommend processes for handling of security reports (but not the actual administration of security reports, which are reviewed by a group of people directly delegated to by the TSC). +* Define and maintain policies and procedures for the coordination of security concerns within the external Node.js open source ecosystem. +* Offer help to npm package maintainers to fix high-impact security bugs. +* Maintain and make available data on disclosed security vulnerabilities in: + * the core Node.js project + * other projects maintained by the Node.js Foundation technical group + * the external Node.js open source ecosystem +* Promote the improvement of security practices within the Node.js ecosystem. +* Recommend security improvements for the core Node.js project. +* Facilitate and promote the expansion of a healthy security service and product provider ecosystem. + +### [Streams](https://github.com/nodejs/readable-stream) + +The Streams Working Group is dedicated to the support and improvement of the Streams API as used in Node.js and the npm ecosystem. We seek to create a composable API that solves the problem of representing multiple occurrences of an event over time in a humane, low-overhead fashion. Improvements to the API will be driven by the needs of the ecosystem; interoperability and backwards compatibility with other solutions and prior versions are paramount in importance. + +Responsibilities include: + +* Addressing stream issues on the Node.js issue tracker. +* Authoring and editing stream documentation within the Node.js project. +* Reviewing changes to stream subclasses within the Node.js project. +* Redirecting changes to streams from the Node.js project to this project. +* Assisting in the implementation of stream providers within Node.js. +* Recommending versions of `readable-stream` to be included in Node.js. +* Messaging about the future of streams to give the community advance notice of changes. diff --git a/locale/ro/docs/es6.md b/locale/ro/docs/es6.md new file mode 100644 index 000000000000..e73cd7bb6604 --- /dev/null +++ b/locale/ro/docs/es6.md @@ -0,0 +1,46 @@ +--- +title: ECMAScript 2015 (ES6) and beyond +layout: docs.hbs +--- + +# ECMAScript 2015 (ES6) and beyond + +Node.js is built against modern versions of [V8](https://v8.dev/). By keeping up-to-date with the latest releases of this engine, we ensure new features from the [JavaScript ECMA-262 specification](http://www.ecma-international.org/publications/standards/Ecma-262.htm) are brought to Node.js developers in a timely manner, as well as continued performance and stability improvements. + +All ECMAScript 2015 (ES6) features are split into three groups for **shipping**, **staged**, and **in progress** features: + +* All **shipping** features, which V8 considers stable, are turned **on by default on Node.js** and do **NOT** require any kind of runtime flag. +* **Staged** features, which are almost-completed features that are not considered stable by the V8 team, require a runtime flag: `--harmony`. +* **In progress** features can be activated individually by their respective harmony flag, although this is highly discouraged unless for testing purposes. Note: these flags are exposed by V8 and will potentially change without any deprecation notice. + +## Which features ship with which Node.js version by default? + +The website [node.green](https://node.green/) provides an excellent overview over supported ECMAScript features in various versions of Node.js, based on kangax's compat-table. + +## Which features are in progress? + +New features are constantly being added to the V8 engine. Generally speaking, expect them to land on a future Node.js release, although timing is unknown. + +You may list all the *in progress* features available on each Node.js release by grepping through the `--v8-options` argument. Please note that these are incomplete and possibly broken features of V8, so use them at your own risk: + +```bash +node --v8-options | grep "in progress" +``` + +## What about the performance of a particular feature? + +The V8 team is constantly working to improve the performance of new language features to eventually reach parity with their transpiled or native counterparts in EcmaScript 5 and earlier. The current progress there is tracked on the website [six-speed](https://fhinkel.github.io/six-speed), which shows the performance of ES2015 and ESNext features compared to their native ES5 counterparts. + +The work on optimizing features introduced with ES2015 and beyond is coordinated via a [performance plan](https://docs.google.com/document/d/1EA9EbfnydAmmU_lM8R_uEMQ-U_v4l9zulePSBkeYWmY), where the V8 team gathers and coordinates areas that need improvement, and design documents to tackle those problems. + +## I have my infrastructure set up to leverage the --harmony flag. Should I remove it? + +The current behaviour of the `--harmony` flag on Node.js is to enable **staged** features only. After all, it is now a synonym of `--es_staging`. As mentioned above, these are completed features that have not been considered stable yet. If you want to play safe, especially on production environments, consider removing this runtime flag until it ships by default on V8 and, consequently, on Node.js. If you keep this enabled, you should be prepared for further Node.js upgrades to break your code if V8 changes their semantics to more closely follow the standard. + +## How do I find which version of V8 ships with a particular version of Node.js? + +Node.js provides a simple way to list all dependencies and respective versions that ship with a specific binary through the `process` global object. In case of the V8 engine, type the following in your terminal to retrieve its version: + +```bash +node -p process.versions.v8 +``` diff --git a/locale/ro/docs/guides/abi-stability.md b/locale/ro/docs/guides/abi-stability.md new file mode 100644 index 000000000000..25c716db98bc --- /dev/null +++ b/locale/ro/docs/guides/abi-stability.md @@ -0,0 +1,35 @@ +--- +title: ABI Stability +layout: docs.hbs +--- + +# ABI Stability + +## Introduction +An Application Binary Interface (ABI) is a way for programs to call functions and use data structures from other compiled programs. It is the compiled version of an Application Programming Interface (API). In other words, the headers files describing the classes, functions, data structures, enumerations, and constants which enable an application to perform a desired task correspond by way of compilation to a set of addresses and expected parameter values and memory structure sizes and layouts with which the provider of the ABI was compiled. + +The application using the ABI must be compiled such that the available addresses, expected parameter values, and memory structure sizes and layouts agree with those with which the ABI provider was compiled. This is usually accomplished by compiling against the headers provided by the ABI provider. + +Since the provider of the ABI and the user of the ABI may be compiled at different times with different versions of the compiler, a portion of the responsibility for ensuring ABI compatibility lies with the compiler. Different versions of the compiler, perhaps provided by different vendors, must all produce the same ABI from a header file with a certain content, and must produce code for the application using the ABI that accesses the API described in a given header according to the conventions of the ABI resulting from the description in the header. Modern compilers have a fairly good track record of not breaking the ABI compatibility of the applications they compile. + +The remaining responsibility for ensuring ABI compatibility lies with the team maintaining the header files which provide the API that results, upon compilation, in the ABI that is to remain stable. Changes to the header files can be made, but the nature of the changes has to be closely tracked to ensure that, upon compilation, the ABI does not change in a way that will render existing users of the ABI incompatible with the new version. + +## ABI Stability in Node.js +Node.js provides header files maintained by several independent teams. For example, header files such as `node.h` and `node_buffer.h` are maintained by the Node.js team. `v8.h` is maintained by the V8 team, which, although in close co-operation with the Node.js team, is independent, and with its own schedule and priorities. Thus, the Node.js team has only partial control over the changes that are introduced in the headers the project provides. As a result, the Node.js project has adopted [semantic versioning](https://semver.org/). This ensures that the APIs provided by the project will result in a stable ABI for all minor and patch versions of Node.js released within one major version. In practice, this means that the Node.js project has committed itself to ensuring that a Node.js native addon compiled against a given major version of Node.js will load successfully when loaded by any Node.js minor or patch version within the major version against which it was compiled. + +## N-API +Demand has arisen for equipping Node.js with an API that results in an ABI that remains stable across multiple Node.js major versions. The motivation for creating such an API is as follows: + +* The JavaScript language has remained compatible with itself since its very early days, whereas the ABI of the engine executing the JavaScript code changes with every major version of Node.js. This means that applications consisting of Node.js packages written entirely in JavaScript need not be recompiled, reinstalled, or redeployed as a new major version of Node.js is dropped into the production environment in which such applications run. In contrast, if an application depends on a package that contains a native addon, the application has to be recompiled, reinstalled, and redeployed whenever a new major version of Node.js is introduced into the production environment. This disparity between Node.js packages containing native addons and those that are written entirely in JavaScript has added to the maintenance burden of production systems which rely on native addons. + +* Other projects have started to produce JavaScript interfaces that are essentially alternative implementations of Node.js. Since these projects are usually built on a different JavaScript engine than V8, their native addons necessarily take on a different structure and use a different API. Nevertheless, using a single API for a native addon across different implementations of the Node.js JavaScript API would allow these projects to take advantage of the ecosystem of JavaScript packages that has accrued around Node.js. + +* Node.js may contain a different JavaScript engine in the future. This means that, externally, all Node.js interfaces would remain the same, but the V8 header file would be absent. Such a step would cause the disruption of the Node.js ecosystem in general, and that of the native addons in particular, if an API that is JavaScript engine agnostic is not first provided by Node.js and adopted by native addons. + +To these ends Node.js has introduced N-API in version 8.6.0 and marked it as a stable component of the project as of Node.js 8.12.0. The API is defined in the headers [`node_api.h`][] and [`node_api_types.h`][], and provides a forward- compatibility guarantee that crosses the Node.js major version boundary. The guarantee can be stated as follows: + +**A given version *n* of N-API will be available in the major version of Node.js in which it was published, and in all subsequent versions of Node.js, including subsequent major versions.** + +A native addon author can take advantage of the N-API forward compatibility guarantee by ensuring that the addon makes use only of APIs defined in `node_api.h` and data structures and constants defined in `node_api_types.h`. By doing so, the author facilitates adoption of their addon by indicating to production users that the maintenance burden for their application will increase no more by the addition of the native addon to their project than it would by the addition of a package written purely in JavaScript. + +N-API is versioned because new APIs are added from time to time. Unlike semantic versioning, N-API versioning is cumulative. That is, each version of N-API conveys the same meaning as a minor version in the semver system, meaning that all changes made to N-API will be backwards compatible. Additionally, new N-APIs are added under an experimental flag to give the community an opportunity to vet them in a production environment. Experimental status means that, although care has been taken to ensure that the new API will not have to be modified in an ABI-incompatible way in the future, it has not yet been sufficiently proven in production to be correct and useful as designed and, as such, may undergo ABI-incompatible changes before it is finally incorporated into a forthcoming version of N-API. That is, an experimental N-API is not yet covered by the forward compatibility guarantee. diff --git a/locale/ro/docs/guides/anatomy-of-an-http-transaction.md b/locale/ro/docs/guides/anatomy-of-an-http-transaction.md new file mode 100644 index 000000000000..da0a84d34679 --- /dev/null +++ b/locale/ro/docs/guides/anatomy-of-an-http-transaction.md @@ -0,0 +1,316 @@ +--- +title: Anatomy of an HTTP Transaction +layout: docs.hbs +--- + +# Anatomy of an HTTP Transaction + +The purpose of this guide is to impart a solid understanding of the process of Node.js HTTP handling. We'll assume that you know, in a general sense, how HTTP requests work, regardless of language or programming environment. We'll also assume a bit of familiarity with Node.js [`EventEmitters`][] and [`Streams`][]. If you're not quite familiar with them, it's worth taking a quick read through the API docs for each of those. + +## Create the Server + +Any node web server application will at some point have to create a web server object. This is done by using [`createServer`][]. + +```javascript +const http = require('http'); + +const server = http.createServer((request, response) => { + // magic happens here! +}); +``` + +The function that's passed in to [`createServer`][] is called once for every HTTP request that's made against that server, so it's called the request handler. In fact, the [`Server`][] object returned by [`createServer`][] is an [`EventEmitter`][], and what we have here is just shorthand for creating a `server` object and then adding the listener later. + +```javascript +const server = http.createServer(); +server.on('request', (request, response) => { + // the same kind of magic happens here! +}); +``` + +When an HTTP request hits the server, node calls the request handler function with a few handy objects for dealing with the transaction, `request` and `response`. We'll get to those shortly. + +In order to actually serve requests, the [`listen`][] method needs to be called on the `server` object. In most cases, all you'll need to pass to `listen` is the port number you want the server to listen on. There are some other options too, so consult the [API reference](https://nodejs.org/api/http.html). + +## Method, URL and Headers + +When handling a request, the first thing you'll probably want to do is look at the method and URL, so that appropriate actions can be taken. Node.js makes this relatively painless by putting handy properties onto the `request` object. + +```javascript +const { method, url } = request; +``` + +> **Note:** The `request` object is an instance of [`IncomingMessage`][]. + +The `method` here will always be a normal HTTP method/verb. The `url` is the full URL without the server, protocol or port. For a typical URL, this means everything after and including the third forward slash. + +Headers are also not far away. They're in their own object on `request` called `headers`. + +```javascript +const { headers } = request; +const userAgent = headers['user-agent']; +``` + +It's important to note here that all headers are represented in lower-case only, regardless of how the client actually sent them. This simplifies the task of parsing headers for whatever purpose. + +If some headers are repeated, then their values are overwritten or joined together as comma-separated strings, depending on the header. In some cases, this can be problematic, so [`rawHeaders`][] is also available. + +## Request Body + +When receiving a `POST` or `PUT` request, the request body might be important to your application. Getting at the body data is a little more involved than accessing request headers. The `request` object that's passed in to a handler implements the [`ReadableStream`][] interface. This stream can be listened to or piped elsewhere just like any other stream. We can grab the data right out of the stream by listening to the stream's `'data'` and `'end'` events. + +The chunk emitted in each `'data'` event is a [`Buffer`][]. If you know it's going to be string data, the best thing to do is collect the data in an array, then at the `'end'`, concatenate and stringify it. + +```javascript +let body = []; +request.on('data', (chunk) => { + body.push(chunk); +}).on('end', () => { + body = Buffer.concat(body).toString(); + // at this point, `body` has the entire request body stored in it as a string +}); +``` + +> **Note:** This may seem a tad tedious, and in many cases, it is. Luckily, there are modules like [`concat-stream`][] and [`body`][] on [`npm`][] which can help hide away some of this logic. It's important to have a good understanding of what's going on before going down that road, and that's why you're here! + +## A Quick Thing About Errors + +Since the `request` object is a [`ReadableStream`][], it's also an [`EventEmitter`][] and behaves like one when an error happens. + +An error in the `request` stream presents itself by emitting an `'error'` event on the stream. **If you don't have a listener for that event, the error will be *thrown*, which could crash your Node.js program.** You should therefore add an `'error'` listener on your request streams, even if you just log it and continue on your way. (Though it's probably best to send some kind of HTTP error response. More on that later.) + +```javascript +request.on('error', (err) => { + // This prints the error message and stack trace to `stderr`. + console.error(err.stack); +}); +``` + +There are other ways of [handling these errors](https://nodejs.org/api/errors.html) such as other abstractions and tools, but always be aware that errors can and do happen, and you're going to have to deal with them. + +## What We've Got so Far + +At this point, we've covered creating a server, and grabbing the method, URL, headers and body out of requests. When we put that all together, it might look something like this: + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + const { headers, method, url } = request; + let body = []; + request.on('error', (err) => { + console.error(err); + }).on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + // At this point, we have the headers, method, url and body, and can now + // do whatever we need to in order to respond to this request. + }); +}).listen(8080); // Activates this server, listening on port 8080. +``` + +If we run this example, we'll be able to *receive* requests, but not *respond* to them. In fact, if you hit this example in a web browser, your request would time out, as nothing is being sent back to the client. + +So far we haven't touched on the `response` object at all, which is an instance of [`ServerResponse`][], which is a [`WritableStream`][]. It contains many useful methods for sending data back to the client. We'll cover that next. + +## HTTP Status Code + +If you don't bother setting it, the HTTP status code on a response will always be 200. Of course, not every HTTP response warrants this, and at some point you'll definitely want to send a different status code. To do that, you can set the `statusCode` property. + +```javascript +response.statusCode = 404; // Tell the client that the resource wasn't found. +``` + +There are some other shortcuts to this, as we'll see soon. + +## Setting Response Headers + +Headers are set through a convenient method called [`setHeader`][]. + +```javascript +response.setHeader('Content-Type', 'application/json'); +response.setHeader('X-Powered-By', 'bacon'); +``` + +When setting the headers on a response, the case is insensitive on their names. If you set a header repeatedly, the last value you set is the value that gets sent. + +## Explicitly Sending Header Data + +The methods of setting the headers and status code that we've already discussed assume that you're using "implicit headers". This means you're counting on node to send the headers for you at the correct time before you start sending body data. + +If you want, you can *explicitly* write the headers to the response stream. To do this, there's a method called [`writeHead`][], which writes the status code and the headers to the stream. + +```javascript +response.writeHead(200, { + 'Content-Type': 'application/json', + 'X-Powered-By': 'bacon' +}); +``` + +Once you've set the headers (either implicitly or explicitly), you're ready to start sending response data. + +## Sending Response Body + +Since the `response` object is a [`WritableStream`][], writing a response body out to the client is just a matter of using the usual stream methods. + +```javascript +response.write(''); +response.write(''); +response.write('

Hello, World!

'); +response.write(''); +response.write(''); +response.end(); +``` + +The `end` function on streams can also take in some optional data to send as the last bit of data on the stream, so we can simplify the example above as follows. + +```javascript +response.end('

Hello, World!

'); +``` + +> **Note:** It's important to set the status and headers *before* you start writing chunks of data to the body. This makes sense, since headers come before the body in HTTP responses. + +## Another Quick Thing About Errors + +The `response` stream can also emit `'error'` events, and at some point you're going to have to deal with that as well. All of the advice for `request` stream errors still applies here. + +## Put It All Together + +Now that we've learned about making HTTP responses, let's put it all together. Building on the earlier example, we're going to make a server that sends back all of the data that was sent to us by the user. We'll format that data as JSON using `JSON.stringify`. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + const { headers, method, url } = request; + let body = []; + request.on('error', (err) => { + console.error(err); + }).on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + // BEGINNING OF NEW STUFF + + response.on('error', (err) => { + console.error(err); + }); + + response.statusCode = 200; + response.setHeader('Content-Type', 'application/json'); + // Note: the 2 lines above could be replaced with this next one: + // response.writeHead(200, {'Content-Type': 'application/json'}) + + const responseBody = { headers, method, url, body }; + + response.write(JSON.stringify(responseBody)); + response.end(); + // Note: the 2 lines above could be replaced with this next one: + // response.end(JSON.stringify(responseBody)) + + // END OF NEW STUFF + }); +}).listen(8080); +``` + +## Echo Server Example + +Let's simplify the previous example to make a simple echo server, which just sends whatever data is received in the request right back in the response. All we need to do is grab the data from the request stream and write that data to the response stream, similar to what we did previously. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + let body = []; + request.on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + response.end(body); + }); +}).listen(8080); +``` + +Now let's tweak this. We want to only send an echo under the following conditions: + +* The request method is POST. +* The URL is `/echo`. + +In any other case, we want to simply respond with a 404. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + if (request.method === 'POST' && request.url === '/echo') { + let body = []; + request.on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + response.end(body); + }); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +> **Note:** By checking the URL in this way, we're doing a form of "routing". Other forms of routing can be as simple as `switch` statements or as complex as whole frameworks like [`express`][]. If you're looking for something that does routing and nothing else, try [`router`][]. + +Great! Now let's take a stab at simplifying this. Remember, the `request` object is a [`ReadableStream`][] and the `response` object is a [`WritableStream`][]. That means we can use [`pipe`][] to direct data from one to the other. That's exactly what we want for an echo server! + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + if (request.method === 'POST' && request.url === '/echo') { + request.pipe(response); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +Yay streams! + +We're not quite done yet though. As mentioned multiple times in this guide, errors can and do happen, and we need to deal with them. + +To handle errors on the request stream, we'll log the error to `stderr` and send a 400 status code to indicate a `Bad Request`. In a real-world application, though, we'd want to inspect the error to figure out what the correct status code and message would be. As usual with errors, you should consult the [`Error` documentation][]. + +On the response, we'll just log the error to `stderr`. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + request.on('error', (err) => { + console.error(err); + response.statusCode = 400; + response.end(); + }); + response.on('error', (err) => { + console.error(err); + }); + if (request.method === 'POST' && request.url === '/echo') { + request.pipe(response); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +We've now covered most of the basics of handling HTTP requests. At this point, you should be able to: + +* Instantiate an HTTP server with a request handler function, and have it listen on a port. +* Get headers, URL, method and body data from `request` objects. +* Make routing decisions based on URL and/or other data in `request` objects. +* Send headers, HTTP status codes and body data via `response` objects. +* Pipe data from `request` objects and to `response` objects. +* Handle stream errors in both the `request` and `response` streams. + +From these basics, Node.js HTTP servers for many typical use cases can be constructed. There are plenty of other things these APIs provide, so be sure to read through the API docs for [`EventEmitters`][], [`Streams`][], and [`HTTP`][]. diff --git a/locale/ro/docs/guides/backpressuring-in-streams.md b/locale/ro/docs/guides/backpressuring-in-streams.md new file mode 100644 index 000000000000..b3fc5c6d3375 --- /dev/null +++ b/locale/ro/docs/guides/backpressuring-in-streams.md @@ -0,0 +1,449 @@ +--- +title: Backpressuring in Streams +layout: docs.hbs +--- + +# Backpressuring in Streams + +There is a general problem that occurs during data handling called [`backpressure`][] and describes a buildup of data behind a buffer during data transfer. When the receiving end of the transfer has complex operations, or is slower for whatever reason, there is a tendency for data from the incoming source to accumulate, like a clog. + +To solve this problem, there must be a delegation system in place to ensure a smooth flow of data from one source to another. Different communities have resolved this issue uniquely to their programs, Unix pipes and TCP sockets are good examples of this, and is often times referred to as _flow control_. In Node.js, streams have been the adopted solution. + +The purpose of this guide is to further detail what backpressure is, and how exactly streams address this in Node.js' source code. The second part of the guide will introduce suggested best practices to ensure your application's code is safe and optimized when implementing streams. + +We assume a little familiarity with the general definition of [`backpressure`][], [`Buffer`][], and [`EventEmitters`][] in Node.js, as well as some experience with [`Stream`][]. If you haven't read through those docs, it's not a bad idea to take a look at the API documentation first, as it will help expand your understanding while reading this guide. + +## The Problem with Data Handling + +In a computer system, data is transferred from one process to another through pipes, sockets, and signals. In Node.js, we find a similar mechanism called [`Stream`][]. Streams are great! They do so much for Node.js and almost every part of the internal codebase utilizes that module. As a developer, you are more than encouraged to use them too! + +```javascript +const readline = require('readline'); + +// process.stdin and process.stdout are both instances of Streams. +const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout +}); + +rl.question('Why should you use streams? ', (answer) => { + console.log(`Maybe it's ${answer}, maybe it's because they are awesome! :)`); + + rl.close(); +}); +``` + +A good example of why the backpressure mechanism implemented through streams is a great optimization can be demonstrated by comparing the internal system tools from Node.js' [`Stream`][] implementation. + +In one scenario, we will take a large file (approximately ~9gb) and compress it using the familiar [`zip(1)`][] tool. + +``` +zip The.Matrix.1080p.mkv +``` + +While that will take a few minutes to complete, in another shell we may run a script that takes Node.js' module [`zlib`][], that wraps around another compression tool, [`gzip(1)`][]. + +```javascript +const gzip = require('zlib').createGzip(); +const fs = require('fs'); + +const inp = fs.createReadStream('The.Matrix.1080p.mkv'); +const out = fs.createWriteStream('The.Matrix.1080p.mkv.gz'); + +inp.pipe(gzip).pipe(out); +``` + +To test the results, try opening each compressed file. The file compressed by the [`zip(1)`][] tool will notify you the file is corrupt, whereas the compression finished by [`Stream`][] will decompress without error. + +Note: In this example, we use `.pipe()` to get the data source from one end to the other. However, notice there are no proper error handlers attached. If a chunk of data were to fail to be properly received, the `Readable` source or `gzip` stream will not be destroyed. [`pump`][] is a utility tool that would properly destroy all the streams in a pipeline if one of them fails or closes, and is a must have in this case! + +[`pump`][] is only necessary for Node.js 8.x or earlier, as for Node.js 10.x or later version, [`pipeline`][] is introduced to replace for [`pump`][]. This is a module method to pipe between streams forwarding errors and properly cleaning up and provide a callback when the pipeline is complete. + +Here is an example of using pipeline: + +```javascript +const { pipeline } = require('stream'); +const fs = require('fs'); +const zlib = require('zlib'); + +// Use the pipeline API to easily pipe a series of streams +// together and get notified when the pipeline is fully done. +// A pipeline to gzip a potentially huge video file efficiently: + +pipeline( + fs.createReadStream('The.Matrix.1080p.mkv'), + zlib.createGzip(), + fs.createWriteStream('The.Matrix.1080p.mkv.gz'), + (err) => { + if (err) { + console.error('Pipeline failed', err); + } else { + console.log('Pipeline succeeded'); + } + } +); +``` + +You can also call [`promisify`][] on pipeline to use it with `async` / `await`: + +```javascript +const stream = require('stream'); +const fs = require('fs'); +const zlib = require('zlib'); +const util = require('util'); + +const pipeline = util.promisify(stream.pipeline); + +async function run() { + try { + await pipeline( + fs.createReadStream('The.Matrix.1080p.mkv'), + zlib.createGzip(), + fs.createWriteStream('The.Matrix.1080p.mkv.gz'), + ); + console.log('Pipeline succeeded'); + } catch (err) { + console.error('Pipeline failed', err); + } +} +``` + +## Too Much Data, Too Quickly + +There are instances where a [`Readable`][] stream might give data to the [`Writable`][] much too quickly — much more than the consumer can handle! + +When that occurs, the consumer will begin to queue all the chunks of data for later consumption. The write queue will get longer and longer, and because of this more data must be kept in memory until the entire process has completed. + +Writing to a disk is a lot slower than reading from a disk, thus, when we are trying to compress a file and write it to our hard disk, backpressure will occur because the write disk will not be able to keep up with the speed from the read. + +```javascript +// Secretly the stream is saying: "whoa, whoa! hang on, this is way too much!" +// Data will begin to build up on the read-side of the data buffer as +// `write` tries to keep up with the incoming data flow. +inp.pipe(gzip).pipe(outputFile); +``` + +This is why a backpressure mechanism is important. If a backpressure system was not present, the process would use up your system's memory, effectively slowing down other processes, and monopolizing a large part of your system until completion. + +This results in a few things: + +* Slowing down all other current processes +* A very overworked garbage collector +* Memory exhaustion + +In the following examples we will take out the [return value](https://github.com/nodejs/node/blob/55c42bc6e5602e5a47fb774009cfe9289cb88e71/lib/_stream_writable.js#L239) of the `.write()` function and change it to `true`, which effectively disables backpressure support in Node.js core. In any reference to 'modified' binary, we are talking about running the `node` binary without the `return ret;` line, and instead with the replaced `return true;`. + +## Excess Drag on Garbage Collection + +Let's take a look at a quick benchmark. Using the same example from above, we ran a few time trials to get a median time for both binaries. + +``` + trial (#) | `node` binary (ms) | modified `node` binary (ms) +================================================================= + 1 | 56924 | 55011 + 2 | 52686 | 55869 + 3 | 59479 | 54043 + 4 | 54473 | 55229 + 5 | 52933 | 59723 +================================================================= +average time: | 55299 | 55975 +``` + +Both take around a minute to run, so there's not much of a difference at all, but let's take a closer look to confirm whether our suspicions are correct. We use the Linux tool [`dtrace`][] to evaluate what's happening with the V8 garbage collector. + +The GC (garbage collector) measured time indicates the intervals of a full cycle of a single sweep done by the garbage collector: + +``` +approx. time (ms) | GC (ms) | modified GC (ms) +================================================= + 0 | 0 | 0 + 1 | 0 | 0 + 40 | 0 | 2 + 170 | 3 | 1 + 300 | 3 | 1 + + * * * + * * * + * * * + + 39000 | 6 | 26 + 42000 | 6 | 21 + 47000 | 5 | 32 + 50000 | 8 | 28 + 54000 | 6 | 35 +``` + +While the two processes start off the same and seem to work the GC at the same rate, it becomes evident that after a few seconds with a properly working backpressure system in place, it spreads the GC load across consistent intervals of 4-8 milliseconds until the end of the data transfer. + +However, when a backpressure system is not in place, the V8 garbage collection starts to drag out. The normal binary called the GC approximately **75** times in a minute, whereas, the modified binary fires only **36** times. + +This is the slow and gradual debt accumulating from growing memory usage. As data gets transferred, without a backpressure system in place, more memory is being used for each chunk transfer. + +The more memory that is being allocated, the more the GC has to take care of in one sweep. The bigger the sweep, the more the GC needs to decide what can be freed up, and scanning for detached pointers in a larger memory space will consume more computing power. + +## Memory Exhaustion + +To determine the memory consumption of each binary, we've clocked each process with `/usr/bin/time -lp sudo ./node ./backpressure-example/zlib.js` individually. + +This is the output on the normal binary: + +``` +Respecting the return value of .write() +============================================= +real 58.88 +user 56.79 +sys 8.79 + 87810048 maximum resident set size + 0 average shared memory size + 0 average unshared data size + 0 average unshared stack size + 19427 page reclaims + 3134 page faults + 0 swaps + 5 block input operations + 194 block output operations + 0 messages sent + 0 messages received + 1 signals received + 12 voluntary context switches + 666037 involuntary context switches +``` + +The maximum byte size occupied by virtual memory turns out to be approximately 87.81 mb. + +And now changing the [return value](https://github.com/nodejs/node/blob/55c42bc6e5602e5a47fb774009cfe9289cb88e71/lib/_stream_writable.js#L239) of the [`.write()`][] function, we get: + +``` +Without respecting the return value of .write(): +================================================== +real 54.48 +user 53.15 +sys 7.43 +1524965376 maximum resident set size + 0 average shared memory size + 0 average unshared data size + 0 average unshared stack size + 373617 page reclaims + 3139 page faults + 0 swaps + 18 block input operations + 199 block output operations + 0 messages sent + 0 messages received + 1 signals received + 25 voluntary context switches + 629566 involuntary context switches +``` + +The maximum byte size occupied by virtual memory turns out to be approximately 1.52 gb. + +Without streams in place to delegate the backpressure, there is an order of magnitude greater of memory space being allocated - a huge margin of difference between the same process! + +This experiment shows how optimized and cost-effective Node.js' backpressure mechanism is for your computing system. Now, let's do a break down on how it works! + +## How Does Backpressure Resolve These Issues? + +There are different functions to transfer data from one process to another. In Node.js, there is an internal built-in function called [`.pipe()`][]. There are [other packages](https://github.com/sindresorhus/awesome-nodejs#streams) out there you can use too! Ultimately though, at the basic level of this process, we have two separate components: the _source_ of the data and the _consumer_. + +When [`.pipe()`][] is called from the source, it signals to the consumer that there is data to be transferred. The pipe function helps to set up the appropriate backpressure closures for the event triggers. + +In Node.js the source is a [`Readable`][] stream and the consumer is the [`Writable`][] stream (both of these may be interchanged with a [`Duplex`][] or a [`Transform`][] stream, but that is out-of-scope for this guide). + +The moment that backpressure is triggered can be narrowed exactly to the return value of a [`Writable`][]'s [`.write()`][] function. This return value is determined by a few conditions, of course. + +In any scenario where the data buffer has exceeded the [`highWaterMark`][] or the write queue is currently busy, [`.write()`][] will return `false`. + +When a `false` value is returned, the backpressure system kicks in. It will pause the incoming [`Readable`][] stream from sending any data and wait until the consumer is ready again. Once the data buffer is emptied, a [`'drain'`][] event will be emitted and resume the incoming data flow. + +Once the queue is finished, backpressure will allow data to be sent again. The space in memory that was being used will free itself up and prepare for the next batch of data. + +This effectively allows a fixed amount of memory to be used at any given time for a [`.pipe()`][] function. There will be no memory leakage, no infinite buffering, and the garbage collector will only have to deal with one area in memory! + +So, if backpressure is so important, why have you (probably) not heard of it? Well the answer is simple: Node.js does all of this automatically for you. + +That's so great! But also not so great when we are trying to understand how to implement our own custom streams. + +Note: In most machines, there is a byte size that determines when a buffer is full (which will vary across different machines). Node.js allows you to set your own custom [`highWaterMark`][], but commonly, the default is set to 16kb (16384, or 16 for objectMode streams). In instances where you might want to raise that value, go for it, but do so with caution! + +## Lifecycle of `.pipe()` + +To achieve a better understanding of backpressure, here is a flow-chart on the lifecycle of a [`Readable`][] stream being [piped](https://nodejs.org/docs/latest/api/stream.html#stream_readable_pipe_destination_options) into a [`Writable`][] stream: + +``` + +===================+ + x--> Piping functions +--> src.pipe(dest) | + x are set up during |===================| + x the .pipe method. | Event callbacks | + +===============+ x |-------------------| + | Your Data | x They exist outside | .on('close', cb) | + +=======+=======+ x the data flow, but | .on('data', cb) | + | x importantly attach | .on('drain', cb) | + | x events, and their | .on('unpipe', cb) | ++---------v---------+ x respective callbacks. | .on('error', cb) | +| Readable Stream +----+ | .on('finish', cb) | ++-^-------^-------^-+ | | .on('end', cb) | + ^ | ^ | +-------------------+ + | | | | + | ^ | | + ^ ^ ^ | +-------------------+ +=================+ + ^ | ^ +----> Writable Stream +---------> .write(chunk) | + | | | +-------------------+ +=======+=========+ + | | | | + | ^ | +------------------v---------+ + ^ | +-> if (!chunk) | Is this chunk too big? | + ^ | | emit .end(); | Is the queue busy? | + | | +-> else +-------+----------------+---+ + | ^ | emit .write(); | | + | ^ ^ +--v---+ +---v---+ + | | ^-----------------------------------< No | | Yes | + ^ | +------+ +---v---+ + ^ | | + | ^ emit .pause(); +=================+ | + | ^---------------^-----------------------+ return false; <-----+---+ + | +=================+ | + | | + ^ when queue is empty +============+ | + ^------------^-----------------------< Buffering | | + | |============| | + +> emit .drain(); | ^Buffer^ | | + +> emit .resume(); +------------+ | + | ^Buffer^ | | + +------------+ add chunk to queue | + | <---^---------------------< + +============+ +``` + +Note: If you are setting up a pipeline to chain together a few streams to manipulate your data, you will most likely be implementing [`Transform`][] stream. + +In this case, your output from your [`Readable`][] stream will enter in the [`Transform`][] and will pipe into the [`Writable`][]. + +```javascript +Readable.pipe(Transformable).pipe(Writable); +``` + +Backpressure will be automatically applied, but note that both the incoming and outgoing `highWaterMark` of the [`Transform`][] stream may be manipulated and will effect the backpressure system. + +## Backpressure Guidelines + +Since [Node.js v0.10](https://nodejs.org/docs/v0.10.0/), the [`Stream`][] class has offered the ability to modify the behaviour of the [`.read()`][] or [`.write()`][] by using the underscore version of these respective functions ([`._read()`][] and [`._write()`][]). + +There are guidelines documented for [implementing Readable streams](https://nodejs.org/docs/latest/api/stream.html#stream_implementing_a_readable_stream) and [implementing Writable streams](https://nodejs.org/docs/latest/api/stream.html#stream_implementing_a_writable_stream). We will assume you've read these over, and the next section will go a little bit more in-depth. + +## Rules to Abide By When Implementing Custom Streams + +The golden rule of streams is **to always respect backpressure**. What constitutes as best practice is non-contradictory practice. So long as you are careful to avoid behaviours that conflict with internal backpressure support, you can be sure you're following good practice. + +In general, + +1. Never `.push()` if you are not asked. +2. Never call `.write()` after it returns false but wait for 'drain' instead. +3. Streams changes between different Node.js versions, and the library you use. Be careful and test things. + +Note: In regards to point 3, an incredibly useful package for building browser streams is [`readable-stream`][]. Rodd Vagg has written a [great blog post](https://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html) describing the utility of this library. In short, it provides a type of automated graceful degradation for [`Readable`][] streams, and supports older versions of browsers and Node.js. + +## Rules specific to Readable Streams + +So far, we have taken a look at how [`.write()`][] affects backpressure and have focused much on the [`Writable`][] stream. Because of Node.js' functionality, data is technically flowing downstream from [`Readable`][] to [`Writable`][]. However, as we can observe in any transmission of data, matter, or energy, the source is just as important as the destination and the [`Readable`][] stream is vital to how backpressure is handled. + +Both these processes rely on one another to communicate effectively, if the [`Readable`][] ignores when the [`Writable`][] stream asks for it to stop sending in data, it can be just as problematic to when the [`.write()`][]'s return value is incorrect. + +So, as well with respecting the [`.write()`][] return, we must also respect the return value of [`.push()`][] used in the [`._read()`][] method. If [`.push()`][] returns a `false` value, the stream will stop reading from the source. Otherwise, it will continue without pause. + +Here is an example of bad practice using [`.push()`][]: + +```javascript +// This is problematic as it completely ignores return value from push +// which may be a signal for backpressure from the destination stream! +class MyReadable extends Readable { + _read(size) { + let chunk; + while (null !== (chunk = getNextChunk())) { + this.push(chunk); + } + } +} +``` + +Additionally, from outside the custom stream, there are pratfalls for ignoring backpressure. In this counter-example of good practice, the application's code forces data through whenever it is available (signaled by the [`'data'` event][]): + +```javascript +// This ignores the backpressure mechanisms Node.js has set in place, +// and unconditionally pushes through data, regardless if the +// destination stream is ready for it or not. +readable.on('data', (data) => + writable.write(data) +); +``` + +## Rules specific to Writable Streams + +Recall that a [`.write()`][] may return true or false dependent on some conditions. Luckily for us, when building our own [`Writable`][] stream, the [`stream state machine`][] will handle our callbacks and determine when to handle backpressure and optimize the flow of data for us. + +However, when we want to use a [`Writable`][] directly, we must respect the [`.write()`][] return value and pay close attention to these conditions: + +* If the write queue is busy, [`.write()`][] will return false. +* If the data chunk is too large, [`.write()`][] will return false (the limit is indicated by the variable, [`highWaterMark`][]). +```javascript +// This writable is invalid because of the async nature of JavaScript callbacks. +// Without a return statement for each callback prior to the last, +// there is a great chance multiple callbacks will be called. +class MyWritable extends Writable { + _write(chunk, encoding, callback) { + if (chunk.toString().indexOf('a') >= 0) + callback(); + else if (chunk.toString().indexOf('b') >= 0) + callback(); + callback(); + } +} + +// The proper way to write this would be: + if (chunk.contains('a')) + return callback(); + if (chunk.contains('b')) + return callback(); + callback(); +``` + +There are also some things to look out for when implementing [`._writev()`][]. The function is coupled with [`.cork()`][], but there is a common mistake when writing: + +```javascript +// Using .uncork() twice here makes two calls on the C++ layer, rendering the +// cork/uncork technique useless. +ws.cork(); +ws.write('hello '); +ws.write('world '); +ws.uncork(); + +ws.cork(); +ws.write('from '); +ws.write('Matteo'); +ws.uncork(); + +// The correct way to write this is to utilize process.nextTick(), which fires +// on the next event loop. +ws.cork(); +ws.write('hello '); +ws.write('world '); +process.nextTick(doUncork, ws); + +ws.cork(); +ws.write('from '); +ws.write('Matteo'); +process.nextTick(doUncork, ws); + +// As a global function. +function doUncork(stream) { + stream.uncork(); +} +``` + +[`.cork()`][] can be called as many times we want, we just need to be careful to call [`.uncork()`][] the same amount of times to make it flow again. + +## Conclusion + +Streams are an often used module in Node.js. They are important to the internal structure, and for developers, to expand and connect across the Node.js modules ecosystem. + +Hopefully, you will now be able to troubleshoot, safely code your own [`Writable`][] and [`Readable`][] streams with backpressure in mind, and share your knowledge with colleagues and friends. + +Be sure to read up more on [`Stream`][] for other API functions to help improve and unleash your streaming capabilities when building an application with Node.js. diff --git a/locale/ro/docs/guides/blocking-vs-non-blocking.md b/locale/ro/docs/guides/blocking-vs-non-blocking.md new file mode 100644 index 000000000000..579d2c912e5c --- /dev/null +++ b/locale/ro/docs/guides/blocking-vs-non-blocking.md @@ -0,0 +1,103 @@ +--- +title: Overview of Blocking vs Non-Blocking +layout: docs.hbs +--- + +# Overview of Blocking vs Non-Blocking + +This overview covers the difference between **blocking** and **non-blocking** calls in Node.js. This overview will refer to the event loop and libuv but no prior knowledge of those topics is required. Readers are assumed to have a basic understanding of the JavaScript language and Node.js [callback pattern](/en/knowledge/getting-started/control-flow/what-are-callbacks/). + +> "I/O" refers primarily to interaction with the system's disk and network supported by [libuv](https://libuv.org/). + +## Blocking + +**Blocking** is when the execution of additional JavaScript in the Node.js process must wait until a non-JavaScript operation completes. This happens because the event loop is unable to continue running JavaScript while a **blocking** operation is occurring. + +In Node.js, JavaScript that exhibits poor performance due to being CPU intensive rather than waiting on a non-JavaScript operation, such as I/O, isn't typically referred to as **blocking**. Synchronous methods in the Node.js standard library that use libuv are the most commonly used **blocking** operations. Native modules may also have **blocking** methods. + +All of the I/O methods in the Node.js standard library provide asynchronous versions, which are **non-blocking**, and accept callback functions. Some methods also have **blocking** counterparts, which have names that end with `Sync`. + +## Comparing Code + +**Blocking** methods execute **synchronously** and **non-blocking** methods execute **asynchronously**. + +Using the File System module as an example, this is a **synchronous** file read: + +```js +const fs = require('fs'); +const data = fs.readFileSync('/file.md'); // blocks here until file is read +``` + +And here is an equivalent **asynchronous** example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; +}); +``` + +The first example appears simpler than the second but has the disadvantage of the second line **blocking** the execution of any additional JavaScript until the entire file is read. Note that in the synchronous version if an error is thrown it will need to be caught or the process will crash. In the asynchronous version, it is up to the author to decide whether an error should throw as shown. + +Let's expand our example a little bit: + +```js +const fs = require('fs'); +const data = fs.readFileSync('/file.md'); // blocks here until file is read +console.log(data); +moreWork(); // will run after console.log +``` + +And here is a similar, but not equivalent asynchronous example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; + console.log(data); +}); +moreWork(); // will run before console.log +``` + +In the first example above, `console.log` will be called before `moreWork()`. In the second example `fs.readFile()` is **non-blocking** so JavaScript execution can continue and `moreWork()` will be called first. The ability to run `moreWork()` without waiting for the file read to complete is a key design choice that allows for higher throughput. + +## Concurrency and Throughput + +JavaScript execution in Node.js is single threaded, so concurrency refers to the event loop's capacity to execute JavaScript callback functions after completing other work. Any code that is expected to run in a concurrent manner must allow the event loop to continue running as non-JavaScript operations, like I/O, are occurring. + +As an example, let's consider a case where each request to a web server takes 50ms to complete and 45ms of that 50ms is database I/O that can be done asynchronously. Choosing **non-blocking** asynchronous operations frees up that 45ms per request to handle other requests. This is a significant difference in capacity just by choosing to use **non-blocking** methods instead of **blocking** methods. + +The event loop is different than models in many other languages where additional threads may be created to handle concurrent work. + +## Dangers of Mixing Blocking and Non-Blocking Code + +There are some patterns that should be avoided when dealing with I/O. Let's look at an example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; + console.log(data); +}); +fs.unlinkSync('/file.md'); +``` + +In the above example, `fs.unlinkSync()` is likely to be run before `fs.readFile()`, which would delete `file.md` before it is actually read. A better way to write this, which is completely **non-blocking** and guaranteed to execute in the correct order is: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (readFileErr, data) => { + if (readFileErr) throw readFileErr; + console.log(data); + fs.unlink('/file.md', (unlinkErr) => { + if (unlinkErr) throw unlinkErr; + }); +}); +``` + +The above places a **non-blocking** call to `fs.unlink()` within the callback of `fs.readFile()` which guarantees the correct order of operations. + +## Additional Resources + +* [libuv](https://libuv.org/) +* [About Node.js](/en/about/) diff --git a/locale/ro/docs/guides/buffer-constructor-deprecation.md b/locale/ro/docs/guides/buffer-constructor-deprecation.md new file mode 100644 index 000000000000..8f5611e2e430 --- /dev/null +++ b/locale/ro/docs/guides/buffer-constructor-deprecation.md @@ -0,0 +1,220 @@ +--- +title: Porting to the Buffer.from()/Buffer.alloc() API +layout: docs.hbs +--- + +# Porting to the `Buffer.from()`/`Buffer.alloc()` API + +## Overview + +This guide explains how to migrate to safe `Buffer` constructor methods. The migration fixes the following deprecation warning: + +
+The Buffer() and new Buffer() constructors are not recommended for use due to security and usability concerns. Please use the new Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() construction methods instead. +
+ +* [Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x](#variant-1) (*recommended*) +* [Variant 2: Use a polyfill](#variant-2) +* [Variant 3: Manual detection, with safeguards](#variant-3) + +### Finding problematic bits of code using `grep` + +Just run `grep -nrE '[^a-zA-Z](Slow)?Buffer\s*\(' --exclude-dir node_modules`. + +It will find all the potentially unsafe places in your own code (with some considerably unlikely exceptions). + +### Finding problematic bits of code using Node.js 8 + +If you’re using Node.js ≥ 8.0.0 (which is recommended), Node.js exposes multiple options that help with finding the relevant pieces of code: + +* `--trace-warnings` will make Node.js show a stack trace for this warning and other warnings that are printed by Node.js. +* `--trace-deprecation` does the same thing, but only for deprecation warnings. +* `--pending-deprecation` will show more types of deprecation warnings. In particular, it will show the `Buffer()` deprecation warning, even on Node.js 8. + +You can set these flags using environment variables: + +```bash +$ export NODE_OPTIONS='--trace-warnings --pending-deprecation' +$ cat example.js +'use strict'; +const foo = new Buffer('foo'); +$ node example.js +(node:7147) [DEP0005] DeprecationWarning: The Buffer() and new Buffer() constructors are not recommended for use due to security and usability concerns. Please use the new Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() construction methods instead. + at showFlaggedDeprecation (buffer.js:127:13) + at new Buffer (buffer.js:148:3) + at Object. (/path/to/example.js:2:13) + [... more stack trace lines ...] +``` + +### Finding problematic bits of code using linters + +ESLint rules [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) also find calls to deprecated `Buffer()` API. Those rules are included in some presets. + +There is a drawback, though, that it doesn't always [work correctly](https://github.com/chalker/safer-buffer#why-not-safe-buffer) when `Buffer` is overridden e.g. with a polyfill, so recommended is a combination of this and some other method described above. + +## Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x + +This is the recommended solution nowadays that would imply only minimal overhead. + +The Node.js 5.x release line has been unsupported since July 2016, and the Node.js 4.x release line reaches its End of Life in April 2018 (→ [Schedule](https://github.com/nodejs/Release#release-schedule)). This means that these versions of Node.js will *not* receive any updates, even in case of security issues, so using these release lines should be avoided, if at all possible. + +What you would do in this case is to convert all `new Buffer()` or `Buffer()` calls to use `Buffer.alloc()` or `Buffer.from()`, in the following way: + +* For `new Buffer(number)`, replace it with `Buffer.alloc(number)`. +* For `new Buffer(string)` (or `new Buffer(string, encoding)`), replace it with `Buffer.from(string)` (or `Buffer.from(string, encoding)`). +* For all other combinations of arguments (these are much rarer), also replace `new Buffer(...arguments)` with `Buffer.from(...arguments)`. + +Note that `Buffer.alloc()` is also _faster_ on the current Node.js versions than `new Buffer(size).fill(0)`, which is what you would otherwise need to ensure zero-filling. + +Enabling ESLint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) is recommended to avoid accidental unsafe `Buffer` API usage. + +There is also a [JSCodeshift codemod](https://github.com/joyeecheung/node-dep-codemod#dep005) for automatically migrating `Buffer` constructors to `Buffer.alloc()` or `Buffer.from()`. Note that it currently only works with cases where the arguments are literals or where the constructor is invoked with two arguments. + +_If you currently support those older Node.js versions and dropping support for them is not possible, or if you support older branches of your packages, consider using [Variant 2](#variant-2) or [Variant 3](#variant-3) on older branches, so people using those older branches will also receive the fix. That way, you will eradicate potential issues caused by unguarded `Buffer` API usage and your users will not observe a runtime deprecation warning when running your code on Node.js 10._ + +## Variant 2: Use a polyfill + +There are three different polyfills available: + +* **[safer-buffer](https://www.npmjs.com/package/safer-buffer)** is a drop-in replacement for the entire `Buffer` API, that will _throw_ when using `new Buffer()`. + + You would take exactly the same steps as in [Variant 1](#variant-1), but with a polyfill `const Buffer = require('safer-buffer').Buffer` in all files where you use the new `Buffer` API. + + Do not use the old `new Buffer()` API. In any files where the line above is added, using old `new Buffer()` API will _throw_. + +* **[buffer-from](https://www.npmjs.com/package/buffer-from) and/or [buffer-alloc](https://www.npmjs.com/package/buffer-alloc)** are [ponyfills](https://ponyfill.com/) for their respective part of the `Buffer` API. You only need to add the package(s) corresponding to the API you are using. + + You would import the module needed with an appropriate name, e.g. `const bufferFrom = require('buffer-from')` and then use that instead of the call to `new Buffer()`, e.g. `new Buffer('test')` becomes `bufferFrom('test')`. + + A downside with this approach is slightly more code changes to migrate off them (as you would be using e.g. `Buffer.from()` under a different name). + +* **[safe-buffer](https://www.npmjs.com/package/safe-buffer)** is also a drop-in replacement for the entire `Buffer` API, but using `new Buffer()` will still work as before. + + A downside to this approach is that it will allow you to also use the older `new Buffer()` API in your code, which is problematic since it can cause issues in your code, and will start emitting runtime deprecation warnings starting with Node.js 10 ([read more here](https://github.com/chalker/safer-buffer#why-not-safe-buffer)). + +Note that in either case, it is important that you also remove all calls to the old `Buffer` API manually — just throwing in `safe-buffer` doesn't fix the problem by itself, it just provides a polyfill for the new API. I have seen people doing that mistake. + +Enabling ESLint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) or [node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) is recommended. + +_Don't forget to drop the polyfill usage once you drop support for Node.js < 4.5.0._ + +## Variant 3 — Manual detection, with safeguards + +This is useful if you create `Buffer` instances in only a few places (e.g. one), or you have your own wrapper around them. + +### `Buffer(0)` + +This special case for creating empty buffers can be safely replaced with `Buffer.concat([])`, which returns the same result all the way down to Node.js 0.8.x. + +### `Buffer(notNumber)` + +Before: + +```js +const buf = new Buffer(notNumber, encoding); +``` + +After: + +```js +let buf; +if (Buffer.from && Buffer.from !== Uint8Array.from) { + buf = Buffer.from(notNumber, encoding); +} else { + if (typeof notNumber === 'number') { + throw new Error('The "size" argument must be not of type number.'); + } + buf = new Buffer(notNumber, encoding); +} +``` + +`encoding` is optional. + +Note that the `typeof notNumber` before `new Buffer()` is required (for cases when `notNumber` argument is not hard-coded) and _is not caused by the deprecation of `Buffer` constructor_ — it's exactly _why_ the `Buffer` constructor is deprecated. Ecosystem packages lacking this type-check caused numerous security issues — situations when unsanitized user input could end up in the `Buffer(arg)` create problems ranging from DoS to leaking sensitive information to the attacker from the process memory. + +When `notNumber` argument is hardcoded (e.g. literal `"abc"` or `[0,1,2]`), the `typeof` check can be omitted. + +Also, note that using TypeScript does not fix this problem for you — when libs written in `TypeScript` are used from JS, or when user input ends up there — it behaves exactly as pure JS, as all type checks are translation-time only and are not present in the actual JS code which TS compiles to. + +### `Buffer(number)` + +For Node.js 0.10.x (and below) support: + +```js +var buf; +if (Buffer.alloc) { + buf = Buffer.alloc(number); +} else { + buf = new Buffer(number); + buf.fill(0); +} +``` + +Otherwise (Node.js ≥ 0.12.x): + +```js +const buf = Buffer.alloc ? Buffer.alloc(number) : new Buffer(number).fill(0); +``` + +## Regarding `Buffer.allocUnsafe()` + +Be extra cautious when using `Buffer.allocUnsafe()`: + +* Don't use it if you don't have a good reason to + * e.g. you probably won't ever see a performance difference for small buffers, in fact, those might be even faster with `Buffer.alloc()`, + * if your code is not in the hot code path — you also probably won't notice a difference, + * keep in mind that zero-filling minimizes the potential risks. +* If you use it, make sure that you never return the buffer in a partially-filled state, + * if you are writing to it sequentially — always truncate it to the actual written length + +Errors in handling buffers allocated with `Buffer.allocUnsafe()` could result in various issues, ranged from undefined behavior of your code to sensitive data (user input, passwords, certs) leaking to the remote attacker. + +_Note that the same applies to `new Buffer()` usage without zero-filling, depending on the Node.js version (and lacking type checks also adds DoS to the list of potential problems)._ + +## FAQ + +### What is wrong with the + +`Buffer` constructor? + +The `Buffer` constructor could be used to create a buffer in many different ways: + +* `new Buffer(42)` creates a `Buffer` of 42 bytes. Before Node.js 8, this buffer contained *arbitrary memory* for performance reasons, which could include anything ranging from program source code to passwords and encryption keys. +* `new Buffer('abc')` creates a `Buffer` that contains the UTF-8-encoded version of the string `'abc'`. A second argument could specify another encoding: for example, `new Buffer(string, 'base64')` could be used to convert a Base64 string into the original sequence of bytes that it represents. +* There are several other combinations of arguments. + +This meant that in code like `var buffer = new Buffer(foo);`, *it is not possible to tell what exactly the contents of the generated buffer are* without knowing the type of `foo`. + +Sometimes, the value of `foo` comes from an external source. For example, this function could be exposed as a service on a web server, converting a UTF-8 string into its Base64 form: + +```js +function stringToBase64(req, res) { + // The request body should have the format of `{ string: 'foobar' }`. + const rawBytes = new Buffer(req.body.string); + const encoded = rawBytes.toString('base64'); + res.end({ encoded }); +} +``` + +Note that this code does *not* validate the type of `req.body.string`: + +* `req.body.string` is expected to be a string. If this is the case, all goes well. +* `req.body.string` is controlled by the client that sends the request. +* If `req.body.string` is the *number* `50`, the `rawBytes` would be `50` bytes: + * Before Node.js 8, the content would be uninitialized + * After Node.js 8, the content would be `50` bytes with the value `0` + +Because of the missing type check, an attacker could intentionally send a number as part of the request. Using this, they can either: + +* Read uninitialized memory. This **will** leak passwords, encryption keys and other kinds of sensitive information. (Information leak) +* Force the program to allocate a large amount of memory. For example, when specifying `500000000` as the input value, each request will allocate 500MB of memory. This can be used to either exhaust the memory available of a program completely and make it crash, or slow it down significantly. (Denial of Service) + +Both of these scenarios are considered serious security issues in a real-world web server context. + +When using `Buffer.from(req.body.string)` instead, passing a number will always throw an exception instead, giving a controlled behavior that can always be handled by the program. + +### The + +`Buffer()` constructor has been deprecated for a while. Is this really an issue? + +Surveys of code in the `npm` ecosystem have shown that the `Buffer()` constructor is still widely used. This includes new code, and overall usage of such code has actually been *increasing*. diff --git a/locale/ro/docs/guides/debugging-getting-started.md b/locale/ro/docs/guides/debugging-getting-started.md new file mode 100644 index 000000000000..5abb1fa1c270 --- /dev/null +++ b/locale/ro/docs/guides/debugging-getting-started.md @@ -0,0 +1,189 @@ +--- +title: Debugging - Getting Started +layout: docs.hbs +--- + +# Debugging Guide + +This guide will help you get started debugging your Node.js apps and scripts. + +## Enable Inspector + +When started with the `--inspect` switch, a Node.js process listens for a debugging client. By default, it will listen at host and port 127.0.0.1:9229. Each process is also assigned a unique [UUID](https://tools.ietf.org/html/rfc4122). + +Inspector clients must know and specify host address, port, and UUID to connect. A full URL will look something like `ws://127.0.0.1:9229/0f2c936f-b1cd-4ac9-aab3-f63b0f33d55e`. + +Node.js will also start listening for debugging messages if it receives a `SIGUSR1` signal. (`SIGUSR1` is not available on Windows.) In Node.js 7 and earlier, this activates the legacy Debugger API. In Node.js 8 and later, it will activate the Inspector API. + +--- +## Security Implications + +Since the debugger has full access to the Node.js execution environment, a malicious actor able to connect to this port may be able to execute arbitrary code on behalf of the Node.js process. It is important to understand the security implications of exposing the debugger port on public and private networks. + +### Exposing the debug port publicly is unsafe + +If the debugger is bound to a public IP address, or to 0.0.0.0, any clients that can reach your IP address will be able to connect to the debugger without any restriction and will be able to run arbitrary code. + +By default `node --inspect` binds to 127.0.0.1. You explicitly need to provide a public IP address or 0.0.0.0, etc., if you intend to allow external connections to the debugger. Doing so may expose you to a potentially significant security threat. We suggest you ensure appropriate firewalls and access controls in place to prevent a security exposure. + +See the section on '[Enabling remote debugging scenarios](#enabling-remote-debugging-scenarios)' on some advice on how to safely allow remote debugger clients to connect. + +### Local applications have full access to the inspector + +Even if you bind the inspector port to 127.0.0.1 (the default), any applications running locally on your machine will have unrestricted access. This is by design to allow local debuggers to be able to attach conveniently. + +### Browsers, WebSockets and same-origin policy + +Websites open in a web-browser can make WebSocket and HTTP requests under the browser security model. An initial HTTP connection is necessary to obtain a unique debugger session id. The same-origin-policy prevents websites from being able to make this HTTP connection. For additional security against [DNS rebinding attacks](https://en.wikipedia.org/wiki/DNS_rebinding), Node.js verifies that the 'Host' headers for the connection either specify an IP address or `localhost` or `localhost6` precisely. + +These security policies disallow connecting to a remote debug server by specifying the hostname. You can work-around this restriction by specifying either the IP address or by using ssh tunnels as described below. + +## Inspector Clients + +Several commercial and open source tools can connect to the Node.js Inspector. Basic info on these follows: + +### [node-inspect](https://github.com/nodejs/node-inspect) + +* CLI Debugger supported by the Node.js Foundation which uses the [Inspector Protocol](https://chromedevtools.github.io/debugger-protocol-viewer/v8/). +* A version is bundled with Node.js and can be used with `node inspect myscript.js`. +* The latest version can also be installed independently (e.g. `npm install -g node-inspect`) and used with `node-inspect myscript.js`. + +### [Chrome DevTools](https://github.com/ChromeDevTools/devtools-frontend) 55+, [Microsoft Edge](https://www.microsoftedgeinsider.com) + +* **Option 1**: Open `chrome://inspect` in a Chromium-based browser or `edge://inspect` in Edge. Click the Configure button and ensure your target host and port are listed. +* **Option 2**: Copy the `devtoolsFrontendUrl` from the output of `/json/list` (see above) or the --inspect hint text and paste into Chrome. + +### [Visual Studio Code](https://github.com/microsoft/vscode) 1.10+ + +* In the Debug panel, click the settings icon to open `.vscode/launch.json`. Select "Node.js" for initial setup. + +### [Visual Studio](https://github.com/Microsoft/nodejstools) 2017 + +* Choose "Debug > Start Debugging" from the menu or hit F5. +* [Detailed instructions](https://github.com/Microsoft/nodejstools/wiki/Debugging). + +### [JetBrains WebStorm](https://www.jetbrains.com/webstorm/) 2017.1+ and other JetBrains IDEs + +* Create a new Node.js debug configuration and hit Debug. `--inspect` will be used by default for Node.js 7+. To disable uncheck `js.debugger.node.use.inspect` in the IDE Registry. + +### [chrome-remote-interface](https://github.com/cyrus-and/chrome-remote-interface) + +* Library to ease connections to Inspector Protocol endpoints. + +### [Gitpod](https://www.gitpod.io) + +* Start a Node.js debug configuration from the `Debug` view or hit `F5`. [Detailed instructions](https://medium.com/gitpod/debugging-node-js-applications-in-theia-76c94c76f0a1) + +### [Eclipse IDE](https://eclipse.org/eclipseide) with Eclipse Wild Web Developer extension + +* From a .js file, choose "Debug As... > Node program", or +* Create a Debug Configuration to attach debugger to running Node.js application (already started with `--inspect`). + +--- + +## Command-line options + +The following table lists the impact of various runtime flags on debugging: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagMeaning
--inspect +
    +
  • Enable inspector agent
  • +
  • Listen on default address and port (127.0.0.1:9229)
  • +
+
--inspect=[host:port] +
    +
  • Enable inspector agent
  • +
  • Bind to address or hostname host (default: 127.0.0.1)
  • +
  • Listen on port port (default: 9229)
  • +
+
--inspect-brk +
    +
  • Enable inspector agent
  • +
  • Listen on default address and port (127.0.0.1:9229)
  • +
  • Break before user code starts
  • +
+
--inspect-brk=[host:port] +
    +
  • Enable inspector agent
  • +
  • Bind to address or hostname host (default: 127.0.0.1)
  • +
  • Listen on port port (default: 9229)
  • +
  • Break before user code starts
  • +
+
node inspect script.js +
    +
  • Spawn child process to run user's script under --inspect flag; + and use main process to run CLI debugger.
  • +
+
node inspect --port=xxxx script.js +
    +
  • Spawn child process to run user's script under --inspect flag; + and use main process to run CLI debugger.
  • +
  • Listen on port port (default: 9229)
  • +
+
+ +--- + +## Enabling remote debugging scenarios + +We recommend that you never have the debugger listen on a public IP address. If you need to allow remote debugging connections we recommend the use of ssh tunnels instead. We provide the following example for illustrative purposes only. Please understand the security risk of allowing remote access to a privileged service before proceeding. + +Let's say you are running Node.js on a remote machine, remote.example.com, that you want to be able to debug. On that machine, you should start the node process with the inspector listening only to localhost (the default). + +```bash +node --inspect server.js +``` + +Now, on your local machine from where you want to initiate a debug client connection, you can setup an ssh tunnel: + +```bash +ssh -L 9221:localhost:9229 user@remote.example.com +``` + +This starts a ssh tunnel session where a connection to port 9221 on your local machine will be forwarded to port 9229 on remote.example.com. You can now attach a debugger such as Chrome DevTools or Visual Studio Code to localhost:9221, which should be able to debug as if the Node.js application was running locally. + +--- + +## Legacy Debugger + +**The legacy debugger has been deprecated as of Node.js 7.7.0. Please use `--inspect` and Inspector instead.** + +When started with the **--debug** or **--debug-brk** switches in version 7 and earlier, Node.js listens for debugging commands defined by the discontinued V8 Debugging Protocol on a TCP port, by default `5858`. Any debugger client which speaks this protocol can connect to and debug the running process; a couple popular ones are listed below. + +The V8 Debugging Protocol is no longer maintained or documented. + +### [Built-in Debugger](https://nodejs.org/dist/latest-v6.x/docs/api/debugger.html) + +Start `node debug script_name.js` to start your script under the builtin command-line debugger. Your script starts in another Node.js process started with the `--debug-brk` option, and the initial Node.js process runs the `_debugger.js` script and connects to your target. + +### [node-inspector](https://github.com/node-inspector/node-inspector) + +Debug your Node.js app with Chrome DevTools by using an intermediary process which translates the Inspector Protocol used in Chromium to the V8 Debugger protocol used in Node.js. + + diff --git a/locale/ro/docs/guides/diagnostics-flamegraph.md b/locale/ro/docs/guides/diagnostics-flamegraph.md new file mode 100644 index 000000000000..678d5040802a --- /dev/null +++ b/locale/ro/docs/guides/diagnostics-flamegraph.md @@ -0,0 +1,121 @@ +--- +title: Diagnostics - Flame Graphs +layout: docs.hbs +--- + +# Flame Graphs + +## What's a flame graph useful for? + +Flame graphs are a way of visualizing CPU time spent in functions. They can help you pin down where you spend too much time doing synchronous operations. + +## How to create a flame graph + +You might have heard creating a flame graph for Node.js is difficult, but that's not true (anymore). Solaris vms are no longer needed for flame graphs! + +Flame graphs are generated from `perf` output, which is not a node-specific tool. While it's the most powerful way to visualize CPU time spent, it may have issues with how JavaScript code is optimized in Node.js 8 and above. See [perf output issues](#perf-output-issues) section below. + +### Use a pre-packaged tool + +If you want a single step that produces a flame graph locally, try [0x](https://www.npmjs.com/package/0x) + +For diagnosing production deployments, read these notes: [0x production servers](https://github.com/davidmarkclements/0x/blob/master/docs/production-servers.md) + +### Create a flame graph with system perf tools + +The purpose of this guide is to show steps involved in creating a flame graph and keep you in control of each step. + +If you want to understand each step better, take a look at the sections that follow where we go into more detail. + +Now let's get to work. + +1. Install `perf` (usually available through the linux-tools-common package if not already installed) +2. try running `perf` - it might complain about missing kernel modules, install them too +3. run node with perf enabled (see [perf output issues](#perf-output-issues) for tips specific to Node.js versions) + + ```bash + perf record -e cycles:u -g -- node --perf-basic-prof app.js + ``` + +4. disregard warnings unless they're saying you can't run perf due to missing packages; you may get some warnings about not being able to access kernel module samples which you're not after anyway. +5. Run `perf script > perfs.out` to generate the data file you'll visualize in a moment. It's useful to [apply some cleanup](#filtering-out-node-js-internal-functions) for a more readable graph +6. install stackvis if not yet installed `npm i -g stackvis` +7. run `stackvis perf < perfs.out > flamegraph.htm` + +Now open the flame graph file in your favorite browser and watch it burn. It's color-coded so you can focus on the most saturated orange bars first. They're likely to represent CPU heavy functions. + +Worth mentioning - if you click an element of a flame graph a zoom-in of its surroundings will get displayed above the graph. + +### Using `perf` to sample a running process + +This is great for recording flame graph data from an already running process that you don't want to interrupt. Imagine a production process with a hard to reproduce issue. + +```bash +perf record -F99 -p `pgrep -n node` -g -- sleep 3 +``` + +Wait, what is that `sleep 3` for? It's there to keep the perf running - despite `-p` option pointing to a different pid, the command needs to be executed on a process and end with it. perf runs for the life of the command you pass to it, whether or not you're actually profiling that command. `sleep 3` ensures that perf runs for 3 seconds. + +Why is `-F` (profiling frequency) set to 99? It's a reasonable default. You can adjust if you want. `-F99` tells perf to take 99 samples per second, for more precision increase the value. Lower values should produce less output with less precise results. Precision you need depends on how long your CPU intensive functions really run. If you're looking for the reason of a noticeable slowdown, 99 frames per second should be more than enough. + +After you get that 3 second perf record, proceed with generating the flame graph with the last two steps from above. + +### Filtering out Node.js internal functions + +Usually you just want to look at the performance of your own calls, so filtering out Node.js and V8 internal functions can make the graph much easier to read. You can clean up your perf file with: + +```bash +sed -i \ + -e "/( __libc_start| LazyCompile | v8::internal::| Builtin:| Stub:| LoadIC:|\[unknown\]| LoadPolymorphicIC:)/d" \ + -e 's/ LazyCompile:[*~]\?/ /' \ + perfs.out +``` + +If you read your flame graph and it seems odd, as if something is missing in the key function taking up most time, try generating your flame graph without the filters - maybe you got a rare case of an issue with Node.js itself. + +### Node.js's profiling options + +`--perf-basic-prof-only-functions` and `--perf-basic-prof` are the two that are useful for debugging your JavaScript code. Other options are used for profiling Node.js itself, which is outside the scope of this guide. + +`--perf-basic-prof-only-functions` produces less output, so it's the option with least overhead. + +### Why do I need them at all? + +Well, without these options you'll still get a flame graph, but with most bars labeled `v8::Function::Call`. + +## `perf` output issues + +### Node.js 8.x V8 pipeline changes + +Node.js 8.x and above ships with new optimizations to JavaScript compilation pipeline in V8 engine which makes function names/references unreachable for perf sometimes. (It's called Turbofan) + +The result is you might not get your function names right in the flame graph. + +You'll notice `ByteCodeHandler:` where you'd expect function names. + +[0x](https://www.npmjs.com/package/0x) has some mitigations for that built in. + +For details see: + +* https://github.com/nodejs/benchmarking/issues/168 +* https://github.com/nodejs/diagnostics/issues/148#issuecomment-369348961 + +### Node.js 10+ + +Node.js 10.x addresses the issue with Turbofan using the `--interpreted-frames-native-stack` flag. + +Run `node --interpreted-frames-native-stack --perf-basic-prof-only-functions` to get function names in the flame graph regardless of which pipeline V8 used to compile your JavaScript. + +### Broken labels in the flame graph + +If you're seeing labels looking like this + +``` +node`_ZN2v88internal11interpreter17BytecodeGenerator15VisitStatementsEPNS0_8ZoneListIPNS0_9StatementEEE +``` + +it means the Linux perf you're using was not compiled with demangle support, see https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1396654 for example + +## Examples + +Practice capturing flame graphs yourself with [a flame graph exercise](https://github.com/naugtur/node-example-flamegraph)! diff --git a/locale/ro/docs/guides/domain-postmortem.md b/locale/ro/docs/guides/domain-postmortem.md new file mode 100644 index 000000000000..243b24a9e760 --- /dev/null +++ b/locale/ro/docs/guides/domain-postmortem.md @@ -0,0 +1,338 @@ +--- +title: Domain Module Postmortem +layout: docs.hbs +--- + +# Domain Module Postmortem + +## Usability Issues + +### Implicit Behavior + +It's possible for a developer to create a new domain and then simply run `domain.enter()`. Which then acts as a catch-all for any exception in the future that couldn't be observed by the thrower. Allowing a module author to intercept the exceptions of unrelated code in a different module. Preventing the originator of the code from knowing about its own exceptions. + +Here's an example of how one indirectly linked modules can affect another: + +```js +// module a.js +const b = require('./b'); +const c = require('./c'); + +// module b.js +const d = require('domain').create(); +d.on('error', () => { /* silence everything */ }); +d.enter(); + +// module c.js +const dep = require('some-dep'); +dep.method(); // Uh-oh! This method doesn't actually exist. +``` + +Since module `b` enters the domain but never exits any uncaught exception will be swallowed. Leaving module `c` in the dark as to why it didn't run the entire script. Leaving a potentially partially populated `module.exports`. Doing this is not the same as listening for `'uncaughtException'`. As the latter is explicitly meant to globally catch errors. The other issue is that domains are processed prior to any `'uncaughtException'` handlers, and prevent them from running. + +Another issue is that domains route errors automatically if no `'error'` handler was set on the event emitter. There is no opt-in mechanism for this, and automatically propagates across the entire asynchronous chain. This may seem useful at first, but once asynchronous calls are two or more modules deep and one of them doesn't include an error handler the creator of the domain will suddenly be catching unexpected exceptions, and the thrower's exception will go unnoticed by the author. + +The following is a simple example of how a missing `'error'` handler allows the active domain to hijack the error: + +```js +const domain = require('domain'); +const net = require('net'); +const d = domain.create(); +d.on('error', (err) => console.error(err.message)); + +d.run(() => net.createServer((c) => { + c.end(); + c.write('bye'); +}).listen(8000)); +``` + +Even manually removing the connection via `d.remove(c)` does not prevent the connection's error from being automatically intercepted. + +Failures that plagues both error routing and exception handling are the inconsistencies in how errors are bubbled. The following is an example of how nested domains will and won't bubble the exception based on when they happen: + +```js +const domain = require('domain'); +const net = require('net'); +const d = domain.create(); +d.on('error', () => console.error('d intercepted an error')); + +d.run(() => { + const server = net.createServer((c) => { + const e = domain.create(); // No 'error' handler being set. + e.run(() => { + // This will not be caught by d's error handler. + setImmediate(() => { + throw new Error('thrown from setImmediate'); + }); + // Though this one will bubble to d's error handler. + throw new Error('immediately thrown'); + }); + }).listen(8080); +}); +``` + +It may be expected that nested domains always remain nested, and will always propagate the exception up the domain stack. Or that exceptions will never automatically bubble. Unfortunately both these situations occur, leading to potentially confusing behavior that may even be prone to difficult to debug timing conflicts. + +### API Gaps + +While APIs based on using `EventEmitter` can use `bind()` and errback style callbacks can use `intercept()`, alternative APIs that implicitly bind to the active domain must be executed inside of `run()`. Meaning if module authors wanted to support domains using a mechanism alternative to those mentioned they must manually implement domain support themselves. Instead of being able to leverage the implicit mechanisms already in place. + +### Error Propagation + +Propagating errors across nested domains is not straight forward, if even possible. Existing documentation shows a simple example of how to `close()` an `http` server if there is an error in the request handler. What it does not explain is how to close the server if the request handler creates another domain instance for another async request. Using the following as a simple example of the failing of error propagation: + +```js +const d1 = domain.create(); +d1.foo = true; // custom member to make more visible in console +d1.on('error', (er) => { /* handle error */ }); + +d1.run(() => setTimeout(() => { + const d2 = domain.create(); + d2.bar = 43; + d2.on('error', (er) => console.error(er.message, domain._stack)); + d2.run(() => { + setTimeout(() => { + setTimeout(() => { + throw new Error('outer'); + }); + throw new Error('inner'); + }); + }); +})); +``` + +Even in the case that the domain instances are being used for local storage so access to resources are made available there is still no way to allow the error to continue propagating from `d2` back to `d1`. Quick inspection may tell us that simply throwing from `d2`'s domain `'error'` handler would allow `d1` to then catch the exception and execute its own error handler. Though that is not the case. Upon inspection of `domain._stack` you'll see that the stack only contains `d2`. + +This may be considered a failing of the API, but even if it did operate in this way there is still the issue of transmitting the fact that a branch in the asynchronous execution has failed, and that all further operations in that branch must cease. In the example of the http request handler, if we fire off several asynchronous requests and each one then `write()`'s data back to the client many more errors will arise from attempting to `write()` to a closed handle. More on this in _Resource Cleanup on Exception_. + +### Resource Cleanup on Exception + +The following script contains a more complex example of properly cleaning up in a small resource dependency tree in the case that an exception occurs in a given connection or any of its dependencies. Breaking down the script into its basic operations: + +```js +'use strict'; + +const domain = require('domain'); +const EE = require('events'); +const fs = require('fs'); +const net = require('net'); +const util = require('util'); +const print = process._rawDebug; + +const pipeList = []; +const FILENAME = '/tmp/tmp.tmp'; +const PIPENAME = '/tmp/node-domain-example-'; +const FILESIZE = 1024; +let uid = 0; + +// Setting up temporary resources +const buf = Buffer.alloc(FILESIZE); +for (let i = 0; i < buf.length; i++) + buf[i] = ((Math.random() * 1e3) % 78) + 48; // Basic ASCII +fs.writeFileSync(FILENAME, buf); + +function ConnectionResource(c) { + EE.call(this); + this._connection = c; + this._alive = true; + this._domain = domain.create(); + this._id = Math.random().toString(32).substr(2).substr(0, 8) + (++uid); + + this._domain.add(c); + this._domain.on('error', () => { + this._alive = false; + }); +} +util.inherits(ConnectionResource, EE); + +ConnectionResource.prototype.end = function end(chunk) { + this._alive = false; + this._connection.end(chunk); + this.emit('end'); +}; + +ConnectionResource.prototype.isAlive = function isAlive() { + return this._alive; +}; + +ConnectionResource.prototype.id = function id() { + return this._id; +}; + +ConnectionResource.prototype.write = function write(chunk) { + this.emit('data', chunk); + return this._connection.write(chunk); +}; + +// Example begin +net.createServer((c) => { + const cr = new ConnectionResource(c); + + const d1 = domain.create(); + fs.open(FILENAME, 'r', d1.intercept((fd) => { + streamInParts(fd, cr, 0); + })); + + pipeData(cr); + + c.on('close', () => cr.end()); +}).listen(8080); + +function streamInParts(fd, cr, pos) { + const d2 = domain.create(); + const alive = true; + d2.on('error', (er) => { + print('d2 error:', er.message); + cr.end(); + }); + fs.read(fd, Buffer.alloc(10), 0, 10, pos, d2.intercept((bRead, buf) => { + if (!cr.isAlive()) { + return fs.close(fd); + } + if (cr._connection.bytesWritten < FILESIZE) { + // Documentation says callback is optional, but doesn't mention that if + // the write fails an exception will be thrown. + const goodtogo = cr.write(buf); + if (goodtogo) { + setTimeout(() => streamInParts(fd, cr, pos + bRead), 1000); + } else { + cr._connection.once('drain', () => streamInParts(fd, cr, pos + bRead)); + } + return; + } + cr.end(buf); + fs.close(fd); + })); +} + +function pipeData(cr) { + const pname = PIPENAME + cr.id(); + const ps = net.createServer(); + const d3 = domain.create(); + const connectionList = []; + d3.on('error', (er) => { + print('d3 error:', er.message); + cr.end(); + }); + d3.add(ps); + ps.on('connection', (conn) => { + connectionList.push(conn); + conn.on('data', () => {}); // don't care about incoming data. + conn.on('close', () => { + connectionList.splice(connectionList.indexOf(conn), 1); + }); + }); + cr.on('data', (chunk) => { + for (let i = 0; i < connectionList.length; i++) { + connectionList[i].write(chunk); + } + }); + cr.on('end', () => { + for (let i = 0; i < connectionList.length; i++) { + connectionList[i].end(); + } + ps.close(); + }); + pipeList.push(pname); + ps.listen(pname); +} + +process.on('SIGINT', () => process.exit()); +process.on('exit', () => { + try { + for (let i = 0; i < pipeList.length; i++) { + fs.unlinkSync(pipeList[i]); + } + fs.unlinkSync(FILENAME); + } catch (e) { } +}); + +``` + +* When a new connection happens, concurrently: + * Open a file on the file system + * Open Pipe to unique socket +* Read a chunk of the file asynchronously +* Write chunk to both the TCP connection and any listening sockets +* If any of these resources error, notify all other attached resources that they need to clean up and shutdown + +As we can see from this example a lot more must be done to properly clean up resources when something fails than what can be done strictly through the domain API. All that domains offer is an exception aggregation mechanism. Even the potentially useful ability to propagate data with the domain is easily countered, in this example, by passing the needed resources as a function argument. + +One problem domains perpetuated was the supposed simplicity of being able to continue execution, contrary to what the documentation stated, of the application despite an unexpected exception. This example demonstrates the fallacy behind that idea. + +Attempting proper resource cleanup on unexpected exception becomes more complex as the application itself grows in complexity. This example only has 3 basic resources in play, and all of them with a clear dependency path. If an application uses something like shared resources or resource reuse the ability to cleanup, and properly test that cleanup has been done, grows greatly. + +In the end, in terms of handling errors, domains aren't much more than a glorified `'uncaughtException'` handler. Except with more implicit and unobservable behavior by third-parties. + +### Resource Propagation + +Another use case for domains was to use it to propagate data along asynchronous data paths. One problematic point is the ambiguity of when to expect the correct domain when there are multiple in the stack (which must be assumed if the async stack works with other modules). Also the conflict between being able to depend on a domain for error handling while also having it available to retrieve the necessary data. + +The following is a involved example demonstrating the failing using domains to propagate data along asynchronous stacks: + +```js +const domain = require('domain'); +const net = require('net'); + +const server = net.createServer((c) => { + // Use a domain to propagate data across events within the + // connection so that we don't have to pass arguments + // everywhere. + const d = domain.create(); + d.data = { connection: c }; + d.add(c); + // Mock class that does some useless async data transformation + // for demonstration purposes. + const ds = new DataStream(dataTransformed); + c.on('data', (chunk) => ds.data(chunk)); +}).listen(8080, () => console.log('listening on 8080')); + +function dataTransformed(chunk) { + // FAIL! Because the DataStream instance also created a + // domain we have now lost the active domain we had + // hoped to use. + domain.active.data.connection.write(chunk); +} + +function DataStream(cb) { + this.cb = cb; + // DataStream wants to use domains for data propagation too! + // Unfortunately this will conflict with any domain that + // already exists. + this.domain = domain.create(); + this.domain.data = { inst: this }; +} + +DataStream.prototype.data = function data(chunk) { + // This code is self contained, but pretend it's a complex + // operation that crosses at least one other module. So + // passing along "this", etc., is not easy. + this.domain.run(() => { + // Simulate an async operation that does the data transform. + setImmediate(() => { + for (let i = 0; i < chunk.length; i++) + chunk[i] = ((chunk[i] + Math.random() * 100) % 96) + 33; + // Grab the instance from the active domain and use that + // to call the user's callback. + const self = domain.active.data.inst; + self.cb(chunk); + }); + }); +}; +``` + +The above shows that it is difficult to have more than one asynchronous API attempt to use domains to propagate data. This example could possibly be fixed by assigning `parent: domain.active` in the `DataStream` constructor. Then restoring it via `domain.active = domain.active.data.parent` just before the user's callback is called. Also the instantiation of `DataStream` in the `'connection'` callback must be run inside `d.run()`, instead of simply using `d.add(c)`, otherwise there will be no active domain. + +In short, for this to have a prayer of a chance usage would need to strictly adhere to a set of guidelines that would be difficult to enforce or test. + +## Performance Issues + +A significant deterrent from using domains is the overhead. Using node's built-in http benchmark, `http_simple.js`, without domains it can handle over 22,000 requests/second. Whereas if it's run with `NODE_USE_DOMAINS=1` that number drops down to under 17,000 requests/second. In this case there is only a single global domain. If we edit the benchmark so the http request callback creates a new domain instance performance drops further to 15,000 requests/second. + +While this probably wouldn't affect a server only serving a few hundred or even a thousand requests per second, the amount of overhead is directly proportional to the number of asynchronous requests made. So if a single connection needs to connect to several other services all of those will contribute to the overall latency of delivering the final product to the client. + +Using `AsyncWrap` and tracking the number of times `init`/`pre`/`post`/`destroy` are called in the mentioned benchmark we find that the sum of all events called is over 170,000 times per second. This means even adding 1 microsecond overhead per call for any type of setup or tear down will result in a 17% performance loss. Granted, this is for the optimized scenario of the benchmark, but I believe this demonstrates the necessity for a mechanism such as domain to be as cheap to run as possible. + +## Looking Ahead + +The domain module has been soft deprecated since Dec 2014, but has not yet been removed because node offers no alternative functionality at the moment. As of this writing there is ongoing work building out the `AsyncWrap` API and a proposal for Zones being prepared for the TC39. At such time there is suitable functionality to replace domains it will undergo the full deprecation cycle and eventually be removed from core. diff --git a/locale/ro/docs/guides/dont-block-the-event-loop.md b/locale/ro/docs/guides/dont-block-the-event-loop.md new file mode 100644 index 000000000000..539fafc8372d --- /dev/null +++ b/locale/ro/docs/guides/dont-block-the-event-loop.md @@ -0,0 +1,385 @@ +--- +title: Don't Block the Event Loop (or the Worker Pool) +layout: docs.hbs +--- + +# Don't Block the Event Loop (or the Worker Pool) + +## Should you read this guide? +If you're writing anything more complicated than a brief command-line script, reading this should help you write higher-performance, more-secure applications. + +This document is written with Node.js servers in mind, but the concepts apply to complex Node.js applications as well. Where OS-specific details vary, this document is Linux-centric. + +## Summary +Node.js runs JavaScript code in the Event Loop (initialization and callbacks), and offers a Worker Pool to handle expensive tasks like file I/O. Node.js scales well, sometimes better than more heavyweight approaches like Apache. The secret to the scalability of Node.js is that it uses a small number of threads to handle many clients. If Node.js can make do with fewer threads, then it can spend more of your system's time and memory working on clients rather than on paying space and time overheads for threads (memory, context-switching). But because Node.js has only a few threads, you must structure your application to use them wisely. + +Here's a good rule of thumb for keeping your Node.js server speedy: *Node.js is fast when the work associated with each client at any given time is "small"*. + +This applies to callbacks on the Event Loop and tasks on the Worker Pool. + +## Why should I avoid blocking the Event Loop and the Worker Pool? +Node.js uses a small number of threads to handle many clients. In Node.js there are two types of threads: one Event Loop (aka the main loop, main thread, event thread, etc.), and a pool of `k` Workers in a Worker Pool (aka the threadpool). + +If a thread is taking a long time to execute a callback (Event Loop) or a task (Worker), we call it "blocked". While a thread is blocked working on behalf of one client, it cannot handle requests from any other clients. This provides two motivations for blocking neither the Event Loop nor the Worker Pool: + +1. Performance: If you regularly perform heavyweight activity on either type of thread, the *throughput* (requests/second) of your server will suffer. +2. Security: If it is possible that for certain input one of your threads might block, a malicious client could submit this "evil input", make your threads block, and keep them from working on other clients. This would be a [Denial of Service](https://en.wikipedia.org/wiki/Denial-of-service_attack) attack. + +## A quick review of Node + +Node.js uses the Event-Driven Architecture: it has an Event Loop for orchestration and a Worker Pool for expensive tasks. + +### What code runs on the Event Loop? +When they begin, Node.js applications first complete an initialization phase, `require`'ing modules and registering callbacks for events. Node.js applications then enter the Event Loop, responding to incoming client requests by executing the appropriate callback. This callback executes synchronously, and may register asynchronous requests to continue processing after it completes. The callbacks for these asynchronous requests will also be executed on the Event Loop. + +The Event Loop will also fulfill the non-blocking asynchronous requests made by its callbacks, e.g., network I/O. + +In summary, the Event Loop executes the JavaScript callbacks registered for events, and is also responsible for fulfilling non-blocking asynchronous requests like network I/O. + +### What code runs on the Worker Pool? +The Worker Pool of Node.js is implemented in libuv ([docs](http://docs.libuv.org/en/v1.x/threadpool.html)), which exposes a general task submission API. + +Node.js uses the Worker Pool to handle "expensive" tasks. This includes I/O for which an operating system does not provide a non-blocking version, as well as particularly CPU-intensive tasks. + +These are the Node.js module APIs that make use of this Worker Pool: + +1. I/O-intensive + 1. [DNS](https://nodejs.org/api/dns.html): `dns.lookup()`, `dns.lookupService()`. + 2. [File System](https://nodejs.org/api/fs.html#fs_threadpool_usage): All file system APIs except `fs.FSWatcher()` and those that are explicitly synchronous use libuv's threadpool. +2. CPU-intensive + 1. [Crypto](https://nodejs.org/api/crypto.html): `crypto.pbkdf2()`, `crypto.scrypt()`, `crypto.randomBytes()`, `crypto.randomFill()`, `crypto.generateKeyPair()`. + 2. [Zlib](https://nodejs.org/api/zlib.html#zlib_threadpool_usage): All zlib APIs except those that are explicitly synchronous use libuv's threadpool. + +In many Node.js applications, these APIs are the only sources of tasks for the Worker Pool. Applications and modules that use a [C++ add-on](https://nodejs.org/api/addons.html) can submit other tasks to the Worker Pool. + +For the sake of completeness, we note that when you call one of these APIs from a callback on the Event Loop, the Event Loop pays some minor setup costs as it enters the Node.js C++ bindings for that API and submits a task to the Worker Pool. These costs are negligible compared to the overall cost of the task, which is why the Event Loop is offloading it. When submitting one of these tasks to the Worker Pool, Node.js provides a pointer to the corresponding C++ function in the Node.js C++ bindings. + +### How does Node.js decide what code to run next? +Abstractly, the Event Loop and the Worker Pool maintain queues for pending events and pending tasks, respectively. + +In truth, the Event Loop does not actually maintain a queue. Instead, it has a collection of file descriptors that it asks the operating system to monitor, using a mechanism like [epoll](http://man7.org/linux/man-pages/man7/epoll.7.html) (Linux), [kqueue](https://developer.apple.com/library/content/documentation/Darwin/Conceptual/FSEvents_ProgGuide/KernelQueues/KernelQueues.html) (OSX), event ports (Solaris), or [IOCP](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198.aspx) (Windows). These file descriptors correspond to network sockets, any files it is watching, and so on. When the operating system says that one of these file descriptors is ready, the Event Loop translates it to the appropriate event and invokes the callback(s) associated with that event. You can learn more about this process [here](https://www.youtube.com/watch?v=P9csgxBgaZ8). + +In contrast, the Worker Pool uses a real queue whose entries are tasks to be processed. A Worker pops a task from this queue and works on it, and when finished the Worker raises an "At least one task is finished" event for the Event Loop. + +### What does this mean for application design? +In a one-thread-per-client system like Apache, each pending client is assigned its own thread. If a thread handling one client blocks, the operating system will interrupt it and give another client a turn. The operating system thus ensures that clients that require a small amount of work are not penalized by clients that require more work. + +Because Node.js handles many clients with few threads, if a thread blocks handling one client's request, then pending client requests may not get a turn until the thread finishes its callback or task. *The fair treatment of clients is thus the responsibility of your application*. This means that you shouldn't do too much work for any client in any single callback or task. + +This is part of why Node.js can scale well, but it also means that you are responsible for ensuring fair scheduling. The next sections talk about how to ensure fair scheduling for the Event Loop and for the Worker Pool. + +## Don't block the Event Loop +The Event Loop notices each new client connection and orchestrates the generation of a response. All incoming requests and outgoing responses pass through the Event Loop. This means that if the Event Loop spends too long at any point, all current and new clients will not get a turn. + +You should make sure you never block the Event Loop. In other words, each of your JavaScript callbacks should complete quickly. This of course also applies to your `await`'s, your `Promise.then`'s, and so on. + +A good way to ensure this is to reason about the ["computational complexity"](https://en.wikipedia.org/wiki/Time_complexity) of your callbacks. If your callback takes a constant number of steps no matter what its arguments are, then you'll always give every pending client a fair turn. If your callback takes a different number of steps depending on its arguments, then you should think about how long the arguments might be. + +Example 1: A constant-time callback. + +```javascript +app.get('/constant-time', (req, res) => { + res.sendStatus(200); +}); +``` + +Example 2: An `O(n)` callback. This callback will run quickly for small `n` and more slowly for large `n`. + +```javascript +app.get('/countToN', (req, res) => { + let n = req.query.n; + + // n iterations before giving someone else a turn + for (let i = 0; i < n; i++) { + console.log(`Iter ${i}`); + } + + res.sendStatus(200); +}); +``` + +Example 3: An `O(n^2)` callback. This callback will still run quickly for small `n`, but for large `n` it will run much more slowly than the previous `O(n)` example. + +```javascript +app.get('/countToN2', (req, res) => { + let n = req.query.n; + + // n^2 iterations before giving someone else a turn + for (let i = 0; i < n; i++) { + for (let j = 0; j < n; j++) { + console.log(`Iter ${i}.${j}`); + } + } + + res.sendStatus(200); +}); +``` + +### How careful should you be? +Node.js uses the Google V8 engine for JavaScript, which is quite fast for many common operations. Exceptions to this rule are regexps and JSON operations, discussed below. + +However, for complex tasks you should consider bounding the input and rejecting inputs that are too long. That way, even if your callback has large complexity, by bounding the input you ensure the callback cannot take more than the worst-case time on the longest acceptable input. You can then evaluate the worst-case cost of this callback and determine whether its running time is acceptable in your context. + +### Blocking the Event Loop: REDOS +One common way to block the Event Loop disastrously is by using a "vulnerable" [regular expression](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions). + +#### Avoiding vulnerable regular expressions +A regular expression (regexp) matches an input string against a pattern. We usually think of a regexp match as requiring a single pass through the input string --- `O(n)` time where `n` is the length of the input string. In many cases, a single pass is indeed all it takes. Unfortunately, in some cases the regexp match might require an exponential number of trips through the input string --- `O(2^n)` time. An exponential number of trips means that if the engine requires `x` trips to determine a match, it will need `2*x` trips if we add only one more character to the input string. Since the number of trips is linearly related to the time required, the effect of this evaluation will be to block the Event Loop. + +A *vulnerable regular expression* is one on which your regular expression engine might take exponential time, exposing you to [REDOS](https://www.owasp.org/index.php/Regular_expression_Denial_of_Service_-_ReDoS) on "evil input". Whether or not your regular expression pattern is vulnerable (i.e. the regexp engine might take exponential time on it) is actually a difficult question to answer, and varies depending on whether you're using Perl, Python, Ruby, Java, JavaScript, etc., but here are some rules of thumb that apply across all of these languages: + +1. Avoid nested quantifiers like `(a+)*`. V8's regexp engine can handle some of these quickly, but others are vulnerable. +2. Avoid OR's with overlapping clauses, like `(a|a)*`. Again, these are sometimes-fast. +3. Avoid using backreferences, like `(a.*) \1`. No regexp engine can guarantee evaluating these in linear time. +4. If you're doing a simple string match, use `indexOf` or the local equivalent. It will be cheaper and will never take more than `O(n)`. + +If you aren't sure whether your regular expression is vulnerable, remember that Node.js generally doesn't have trouble reporting a *match* even for a vulnerable regexp and a long input string. The exponential behavior is triggered when there is a mismatch but Node.js can't be certain until it tries many paths through the input string. + +#### A REDOS example +Here is an example vulnerable regexp exposing its server to REDOS: + +```javascript +app.get('/redos-me', (req, res) => { + let filePath = req.query.filePath; + + // REDOS + if (filePath.match(/(\/.+)+$/)) { + console.log('valid path'); + } + else { + console.log('invalid path'); + } + + res.sendStatus(200); +}); +``` + +The vulnerable regexp in this example is a (bad!) way to check for a valid path on Linux. It matches strings that are a sequence of "/"-delimited names, like "/a/b/c". It is dangerous because it violates rule 1: it has a doubly-nested quantifier. + +If a client queries with filePath `///.../\n` (100 /'s followed by a newline character that the regexp's "." won't match), then the Event Loop will take effectively forever, blocking the Event Loop. This client's REDOS attack causes all other clients not to get a turn until the regexp match finishes. + +For this reason, you should be leery of using complex regular expressions to validate user input. + +#### Anti-REDOS Resources +There are some tools to check your regexps for safety, like + +* [safe-regex](https://github.com/substack/safe-regex) +* [rxxr2](http://www.cs.bham.ac.uk/~hxt/research/rxxr2/). However, neither of these will catch all vulnerable regexps. + +Another approach is to use a different regexp engine. You could use the [node-re2](https://github.com/uhop/node-re2) module, which uses Google's blazing-fast [RE2](https://github.com/google/re2) regexp engine. But be warned, RE2 is not 100% compatible with V8's regexps, so check for regressions if you swap in the node-re2 module to handle your regexps. And particularly complicated regexps are not supported by node-re2. + +If you're trying to match something "obvious", like a URL or a file path, find an example in a [regexp library](http://www.regexlib.com) or use an npm module, e.g. [ip-regex](https://www.npmjs.com/package/ip-regex). + +### Blocking the Event Loop: Node.js core modules +Several Node.js core modules have synchronous expensive APIs, including: + +* [Encryption](https://nodejs.org/api/crypto.html) +* [Compression](https://nodejs.org/api/zlib.html) +* [File system](https://nodejs.org/api/fs.html) +* [Child process](https://nodejs.org/api/child_process.html) + +These APIs are expensive, because they involve significant computation (encryption, compression), require I/O (file I/O), or potentially both (child process). These APIs are intended for scripting convenience, but are not intended for use in the server context. If you execute them on the Event Loop, they will take far longer to complete than a typical JavaScript instruction, blocking the Event Loop. + +In a server, *you should not use the following synchronous APIs from these modules*: + +* Encryption: + * `crypto.randomBytes` (synchronous version) + * `crypto.randomFillSync` + * `crypto.pbkdf2Sync` + * You should also be careful about providing large input to the encryption and decryption routines. +* Compression: + * `zlib.inflateSync` + * `zlib.deflateSync` +* File system: + * Do not use the synchronous file system APIs. For example, if the file you access is in a [distributed file system](https://en.wikipedia.org/wiki/Clustered_file_system#Distributed_file_systems) like [NFS](https://en.wikipedia.org/wiki/Network_File_System), access times can vary widely. +* Child process: + * `child_process.spawnSync` + * `child_process.execSync` + * `child_process.execFileSync` + +This list is reasonably complete as of Node.js v9. + +### Blocking the Event Loop: JSON DOS +`JSON.parse` and `JSON.stringify` are other potentially expensive operations. While these are `O(n)` in the length of the input, for large `n` they can take surprisingly long. + +If your server manipulates JSON objects, particularly those from a client, you should be cautious about the size of the objects or strings you work with on the Event Loop. + +Example: JSON blocking. We create an object `obj` of size 2^21 and `JSON.stringify` it, run `indexOf` on the string, and then JSON.parse it. The `JSON.stringify`'d string is 50MB. It takes 0.7 seconds to stringify the object, 0.03 seconds to indexOf on the 50MB string, and 1.3 seconds to parse the string. + +```javascript +var obj = { a: 1 }; +var niter = 20; + +var before, str, pos, res, took; + +for (var i = 0; i < niter; i++) { + obj = { obj1: obj, obj2: obj }; // Doubles in size each iter +} + +before = process.hrtime(); +str = JSON.stringify(obj); +took = process.hrtime(before); +console.log('JSON.stringify took ' + took); + +before = process.hrtime(); +pos = str.indexOf('nomatch'); +took = process.hrtime(before); +console.log('Pure indexof took ' + took); + +before = process.hrtime(); +res = JSON.parse(str); +took = process.hrtime(before); +console.log('JSON.parse took ' + took); +``` + +There are npm modules that offer asynchronous JSON APIs. See for example: + +* [JSONStream](https://www.npmjs.com/package/JSONStream), which has stream APIs. +* [Big-Friendly JSON](https://www.npmjs.com/package/bfj), which has stream APIs as well as asynchronous versions of the standard JSON APIs using the partitioning-on-the-Event-Loop paradigm outlined below. + +### Complex calculations without blocking the Event Loop +Suppose you want to do complex calculations in JavaScript without blocking the Event Loop. You have two options: partitioning or offloading. + +#### Partitioning +You could *partition* your calculations so that each runs on the Event Loop but regularly yields (gives turns to) other pending events. In JavaScript it's easy to save the state of an ongoing task in a closure, as shown in example 2 below. + +For a simple example, suppose you want to compute the average of the numbers `1` to `n`. + +Example 1: Un-partitioned average, costs `O(n)` + +```javascript +for (let i = 0; i < n; i++) + sum += i; +let avg = sum / n; +console.log('avg: ' + avg); +``` + +Example 2: Partitioned average, each of the `n` asynchronous steps costs `O(1)`. + +```javascript +function asyncAvg(n, avgCB) { + // Save ongoing sum in JS closure. + var sum = 0; + function help(i, cb) { + sum += i; + if (i == n) { + cb(sum); + return; + } + + // "Asynchronous recursion". + // Schedule next operation asynchronously. + setImmediate(help.bind(null, i+1, cb)); + } + + // Start the helper, with CB to call avgCB. + help(1, function(sum){ + var avg = sum/n; + avgCB(avg); + }); +} + +asyncAvg(n, function(avg){ + console.log('avg of 1-n: ' + avg); +}); +``` + +You can apply this principle to array iterations and so forth. + +#### Offloading +If you need to do something more complex, partitioning is not a good option. This is because partitioning uses only the Event Loop, and you won't benefit from multiple cores almost certainly available on your machine. *Remember, the Event Loop should orchestrate client requests, not fulfill them itself.* For a complicated task, move the work off of the Event Loop onto a Worker Pool. + +##### How to offload +You have two options for a destination Worker Pool to which to offload work. + +1. You can use the built-in Node.js Worker Pool by developing a [C++ addon](https://nodejs.org/api/addons.html). On older versions of Node, build your C++ addon using [NAN](https://github.com/nodejs/nan), and on newer versions use [N-API](https://nodejs.org/api/n-api.html). [node-webworker-threads](https://www.npmjs.com/package/webworker-threads) offers a JavaScript-only way to access the Node.js Worker Pool. +2. You can create and manage your own Worker Pool dedicated to computation rather than the Node.js I/O-themed Worker Pool. The most straightforward ways to do this is using [Child Process](https://nodejs.org/api/child_process.html) or [Cluster](https://nodejs.org/api/cluster.html). + +You should *not* simply create a [Child Process](https://nodejs.org/api/child_process.html) for every client. You can receive client requests more quickly than you can create and manage children, and your server might become a [fork bomb](https://en.wikipedia.org/wiki/Fork_bomb). + +##### Downside of offloading +The downside of the offloading approach is that it incurs overhead in the form of *communication costs*. Only the Event Loop is allowed to see the "namespace" (JavaScript state) of your application. From a Worker, you cannot manipulate a JavaScript object in the Event Loop's namespace. Instead, you have to serialize and deserialize any objects you wish to share. Then the Worker can operate on its own copy of these object(s) and return the modified object (or a "patch") to the Event Loop. + +For serialization concerns, see the section on JSON DOS. + +##### Some suggestions for offloading +You may wish to distinguish between CPU-intensive and I/O-intensive tasks because they have markedly different characteristics. + +A CPU-intensive task only makes progress when its Worker is scheduled, and the Worker must be scheduled onto one of your machine's [logical cores](https://nodejs.org/api/os.html#os_os_cpus). If you have 4 logical cores and 5 Workers, one of these Workers cannot make progress. As a result, you are paying overhead (memory and scheduling costs) for this Worker and getting no return for it. + +I/O-intensive tasks involve querying an external service provider (DNS, file system, etc.) and waiting for its response. While a Worker with an I/O-intensive task is waiting for its response, it has nothing else to do and can be de-scheduled by the operating system, giving another Worker a chance to submit their request. Thus, *I/O-intensive tasks will be making progress even while the associated thread is not running*. External service providers like databases and file systems have been highly optimized to handle many pending requests concurrently. For example, a file system will examine a large set of pending write and read requests to merge conflicting updates and to retrieve files in an optimal order (e.g. see [these slides](http://researcher.ibm.com/researcher/files/il-AVISHAY/01-block_io-v1.3.pdf)). + +If you rely on only one Worker Pool, e.g. the Node.js Worker Pool, then the differing characteristics of CPU-bound and I/O-bound work may harm your application's performance. + +For this reason, you might wish to maintain a separate Computation Worker Pool. + +#### Offloading: conclusions +For simple tasks, like iterating over the elements of an arbitrarily long array, partitioning might be a good option. If your computation is more complex, offloading is a better approach: the communication costs, i.e. the overhead of passing serialized objects between the Event Loop and the Worker Pool, are offset by the benefit of using multiple cores. + +However, if your server relies heavily on complex calculations, you should think about whether Node.js is really a good fit. Node.js excels for I/O-bound work, but for expensive computation it might not be the best option. + +If you take the offloading approach, see the section on not blocking the Worker Pool. + +## Don't block the Worker Pool +Node.js has a Worker Pool composed of `k` Workers. If you are using the Offloading paradigm discussed above, you might have a separate Computational Worker Pool, to which the same principles apply. In either case, let us assume that `k` is much smaller than the number of clients you might be handling concurrently. This is in keeping with the "one thread for many clients" philosophy of Node.js, the secret to its scalability. + +As discussed above, each Worker completes its current Task before proceeding to the next one on the Worker Pool queue. + +Now, there will be variation in the cost of the Tasks required to handle your clients' requests. Some Tasks can be completed quickly (e.g. reading short or cached files, or producing a small number of random bytes), and others will take longer (e.g reading larger or uncached files, or generating more random bytes). Your goal should be to *minimize the variation in Task times*, and you should use *Task partitioning* to accomplish this. + +### Minimizing the variation in Task times +If a Worker's current Task is much more expensive than other Tasks, then it will be unavailable to work on other pending Tasks. In other words, *each relatively long Task effectively decreases the size of the Worker Pool by one until it is completed*. This is undesirable because, up to a point, the more Workers in the Worker Pool, the greater the Worker Pool throughput (tasks/second) and thus the greater the server throughput (client requests/second). One client with a relatively expensive Task will decrease the throughput of the Worker Pool, in turn decreasing the throughput of the server. + +To avoid this, you should try to minimize variation in the length of Tasks you submit to the Worker Pool. While it is appropriate to treat the external systems accessed by your I/O requests (DB, FS, etc.) as black boxes, you should be aware of the relative cost of these I/O requests, and should avoid submitting requests you can expect to be particularly long. + +Two examples should illustrate the possible variation in task times. + +#### Variation example: Long-running file system reads +Suppose your server must read files in order to handle some client requests. After consulting the Node.js [File system](https://nodejs.org/api/fs.html) APIs, you opted to use `fs.readFile()` for simplicity. However, `fs.readFile()` is ([currently](https://github.com/nodejs/node/pull/17054)) not partitioned: it submits a single `fs.read()` Task spanning the entire file. If you read shorter files for some users and longer files for others, `fs.readFile()` may introduce significant variation in Task lengths, to the detriment of Worker Pool throughput. + +For a worst-case scenario, suppose an attacker can convince your server to read an *arbitrary* file (this is a [directory traversal vulnerability](https://www.owasp.org/index.php/Path_Traversal)). If your server is running Linux, the attacker can name an extremely slow file: [`/dev/random`](http://man7.org/linux/man-pages/man4/random.4.html). For all practical purposes, `/dev/random` is infinitely slow, and every Worker asked to read from `/dev/random` will never finish that Task. An attacker then submits `k` requests, one for each Worker, and no other client requests that use the Worker Pool will make progress. + +#### Variation example: Long-running crypto operations +Suppose your server generates cryptographically secure random bytes using [`crypto.randomBytes()`](https://nodejs.org/api/crypto.html#crypto_crypto_randombytes_size_callback). `crypto.randomBytes()` is not partitioned: it creates a single `randomBytes()` Task to generate as many bytes as you requested. If you create fewer bytes for some users and more bytes for others, `crypto.randomBytes()` is another source of variation in Task lengths. + +### Task partitioning +Tasks with variable time costs can harm the throughput of the Worker Pool. To minimize variation in Task times, as far as possible you should *partition* each Task into comparable-cost sub-Tasks. When each sub-Task completes it should submit the next sub-Task, and when the final sub-Task completes it should notify the submitter. + +To continue the `fs.readFile()` example, you should instead use `fs.read()` (manual partitioning) or `ReadStream` (automatically partitioned). + +The same principle applies to CPU-bound tasks; the `asyncAvg` example might be inappropriate for the Event Loop, but it is well suited to the Worker Pool. + +When you partition a Task into sub-Tasks, shorter Tasks expand into a small number of sub-Tasks, and longer Tasks expand into a larger number of sub-Tasks. Between each sub-Task of a longer Task, the Worker to which it was assigned can work on a sub-Task from another, shorter, Task, thus improving the overall Task throughput of the Worker Pool. + +Note that the number of sub-Tasks completed is not a useful metric for the throughput of the Worker Pool. Instead, concern yourself with the number of *Tasks* completed. + +### Avoiding Task partitioning +Recall that the purpose of Task partitioning is to minimize the variation in Task times. If you can distinguish between shorter Tasks and longer Tasks (e.g. summing an array vs. sorting an array), you could create one Worker Pool for each class of Task. Routing shorter Tasks and longer Tasks to separate Worker Pools is another way to minimize Task time variation. + +In favor of this approach, partitioning Tasks incurs overhead (the costs of creating a Worker Pool Task representation and of manipulating the Worker Pool queue), and avoiding partitioning saves you the costs of additional trips to the Worker Pool. It also keeps you from making mistakes in partitioning your Tasks. + +The downside of this approach is that Workers in all of these Worker Pools will incur space and time overheads and will compete with each other for CPU time. Remember that each CPU-bound Task makes progress only while it is scheduled. As a result, you should only consider this approach after careful analysis. + +### Worker Pool: conclusions +Whether you use only the Node.js Worker Pool or maintain separate Worker Pool(s), you should optimize the Task throughput of your Pool(s). + +To do this, minimize the variation in Task times by using Task partitioning. + +## The risks of npm modules +While the Node.js core modules offer building blocks for a wide variety of applications, sometimes something more is needed. Node.js developers benefit tremendously from the [npm ecosystem](https://www.npmjs.com/), with hundreds of thousands of modules offering functionality to accelerate your development process. + +Remember, however, that the majority of these modules are written by third-party developers and are generally released with only best-effort guarantees. A developer using an npm module should be concerned about two things, though the latter is frequently forgotten. + +1. Does it honor its APIs? +2. Might its APIs block the Event Loop or a Worker? Many modules make no effort to indicate the cost of their APIs, to the detriment of the community. + +For simple APIs you can estimate the cost of the APIs; the cost of string manipulation isn't hard to fathom. But in many cases it's unclear how much an API might cost. + +*If you are calling an API that might do something expensive, double-check the cost. Ask the developers to document it, or examine the source code yourself (and submit a PR documenting the cost).* + +Remember, even if the API is asynchronous, you don't know how much time it might spend on a Worker or on the Event Loop in each of its partitions. For example, suppose in the `asyncAvg` example given above, each call to the helper function summed *half* of the numbers rather than one of them. Then this function would still be asynchronous, but the cost of each partition would be `O(n)`, not `O(1)`, making it much less safe to use for arbitrary values of `n`. + +## Conclusion +Node.js has two types of threads: one Event Loop and `k` Workers. The Event Loop is responsible for JavaScript callbacks and non-blocking I/O, and a Worker executes tasks corresponding to C++ code that completes an asynchronous request, including blocking I/O and CPU-intensive work. Both types of threads work on no more than one activity at a time. If any callback or task takes a long time, the thread running it becomes *blocked*. If your application makes blocking callbacks or tasks, this can lead to degraded throughput (clients/second) at best, and complete denial of service at worst. + +To write a high-throughput, more DoS-proof web server, you must ensure that on benign and on malicious input, neither your Event Loop nor your Workers will block. diff --git a/locale/ro/docs/guides/event-loop-timers-and-nexttick.md b/locale/ro/docs/guides/event-loop-timers-and-nexttick.md new file mode 100644 index 000000000000..c1105b3a73e9 --- /dev/null +++ b/locale/ro/docs/guides/event-loop-timers-and-nexttick.md @@ -0,0 +1,335 @@ +--- +title: The Node.js Event Loop, Timers, and process.nextTick() +layout: docs.hbs +--- + +# The Node.js Event Loop, Timers, and `process.nextTick()` + +## What is the Event Loop? + +The event loop is what allows Node.js to perform non-blocking I/O operations — despite the fact that JavaScript is single-threaded — by offloading operations to the system kernel whenever possible. + +Since most modern kernels are multi-threaded, they can handle multiple operations executing in the background. When one of these operations completes, the kernel tells Node.js so that the appropriate callback may be added to the **poll** queue to eventually be executed. We'll explain this in further detail later in this topic. + +## Event Loop Explained + +When Node.js starts, it initializes the event loop, processes the provided input script (or drops into the [REPL](https://nodejs.org/api/repl.html#repl_repl), which is not covered in this document) which may make async API calls, schedule timers, or call `process.nextTick()`, then begins processing the event loop. + +The following diagram shows a simplified overview of the event loop's order of operations. + +``` + ┌───────────────────────────┐ +┌─>│ timers │ +│ └─────────────┬─────────────┘ +│ ┌─────────────┴─────────────┐ +│ │ pending callbacks │ +│ └─────────────┬─────────────┘ +│ ┌─────────────┴─────────────┐ +│ │ idle, prepare │ +│ └─────────────┬─────────────┘ ┌───────────────┐ +│ ┌─────────────┴─────────────┐ │ incoming: │ +│ │ poll │<─────┤ connections, │ +│ └─────────────┬─────────────┘ │ data, etc. │ +│ ┌─────────────┴─────────────┐ └───────────────┘ +│ │ check │ +│ └─────────────┬─────────────┘ +│ ┌─────────────┴─────────────┐ +└──┤ close callbacks │ + └───────────────────────────┘ +``` + +*note: each box will be referred to as a "phase" of the event loop.* + +Each phase has a FIFO queue of callbacks to execute. While each phase is special in its own way, generally, when the event loop enters a given phase, it will perform any operations specific to that phase, then execute callbacks in that phase's queue until the queue has been exhausted or the maximum number of callbacks has executed. When the queue has been exhausted or the callback limit is reached, the event loop will move to the next phase, and so on. + +Since any of these operations may schedule _more_ operations and new events processed in the **poll** phase are queued by the kernel, poll events can be queued while polling events are being processed. As a result, long running callbacks can allow the poll phase to run much longer than a timer's threshold. See the [**timers**](#timers) and [**poll**](#poll) sections for more details. + +_**NOTE:** There is a slight discrepancy between the Windows and the Unix/Linux implementation, but that's not important for this demonstration. The most important parts are here. There are actually seven or eight steps, but the ones we care about — ones that Node.js actually uses - are those above._ + +## Phases Overview + +* **timers**: this phase executes callbacks scheduled by `setTimeout()` and `setInterval()`. +* **pending callbacks**: executes I/O callbacks deferred to the next loop iteration. +* **idle, prepare**: only used internally. +* **poll**: retrieve new I/O events; execute I/O related callbacks (almost all with the exception of close callbacks, the ones scheduled by timers, and `setImmediate()`); node will block here when appropriate. +* **check**: `setImmediate()` callbacks are invoked here. +* **close callbacks**: some close callbacks, e.g. `socket.on('close', ...)`. + +Between each run of the event loop, Node.js checks if it is waiting for any asynchronous I/O or timers and shuts down cleanly if there are not any. + +## Phases in Detail + +### timers + +A timer specifies the **threshold** _after which_ a provided callback _may be executed_ rather than the **exact** time a person _wants it to be executed_. Timers callbacks will run as early as they can be scheduled after the specified amount of time has passed; however, Operating System scheduling or the running of other callbacks may delay them. + +_**Note**: Technically, the [**poll** phase](#poll) controls when timers are executed._ + +For example, say you schedule a timeout to execute after a 100 ms threshold, then your script starts asynchronously reading a file which takes 95 ms: + +```js +const fs = require('fs'); + +function someAsyncOperation(callback) { + // Assume this takes 95ms to complete + fs.readFile('/path/to/file', callback); +} + +const timeoutScheduled = Date.now(); + +setTimeout(() => { + const delay = Date.now() - timeoutScheduled; + + console.log(`${delay}ms have passed since I was scheduled`); +}, 100); + +// do someAsyncOperation which takes 95 ms to complete +someAsyncOperation(() => { + const startCallback = Date.now(); + + // do something that will take 10ms... + while (Date.now() - startCallback < 10) { + // do nothing + } +}); +``` + +When the event loop enters the **poll** phase, it has an empty queue (`fs.readFile()` has not completed), so it will wait for the number of ms remaining until the soonest timer's threshold is reached. While it is waiting 95 ms pass, `fs.readFile()` finishes reading the file and its callback which takes 10 ms to complete is added to the **poll** queue and executed. When the callback finishes, there are no more callbacks in the queue, so the event loop will see that the threshold of the soonest timer has been reached then wrap back to the **timers** phase to execute the timer's callback. In this example, you will see that the total delay between the timer being scheduled and its callback being executed will be 105ms. + +Note: To prevent the **poll** phase from starving the event loop, [libuv](https://libuv.org/) (the C library that implements the Node.js event loop and all of the asynchronous behaviors of the platform) also has a hard maximum (system dependent) before it stops polling for more events. + +### pending callbacks + +This phase executes callbacks for some system operations such as types of TCP errors. For example if a TCP socket receives `ECONNREFUSED` when attempting to connect, some \*nix systems want to wait to report the error. This will be queued to execute in the **pending callbacks** phase. + +### poll + +The **poll** phase has two main functions: + +1. Calculating how long it should block and poll for I/O, then +2. Processing events in the **poll** queue. + +When the event loop enters the **poll** phase _and there are no timers scheduled_, one of two things will happen: + +* _If the **poll** queue **is not empty**_, the event loop will iterate through its queue of callbacks executing them synchronously until either the queue has been exhausted, or the system-dependent hard limit is reached. + +* _If the **poll** queue **is empty**_, one of two more things will happen: + * If scripts have been scheduled by `setImmediate()`, the event loop will end the **poll** phase and continue to the **check** phase to execute those scheduled scripts. + + * If scripts **have not** been scheduled by `setImmediate()`, the event loop will wait for callbacks to be added to the queue, then execute them immediately. + +Once the **poll** queue is empty the event loop will check for timers _whose time thresholds have been reached_. If one or more timers are ready, the event loop will wrap back to the **timers** phase to execute those timers' callbacks. + +### check + +This phase allows a person to execute callbacks immediately after the **poll** phase has completed. If the **poll** phase becomes idle and scripts have been queued with `setImmediate()`, the event loop may continue to the **check** phase rather than waiting. + +`setImmediate()` is actually a special timer that runs in a separate phase of the event loop. It uses a libuv API that schedules callbacks to execute after the **poll** phase has completed. + +Generally, as the code is executed, the event loop will eventually hit the **poll** phase where it will wait for an incoming connection, request, etc. However, if a callback has been scheduled with `setImmediate()` and the **poll** phase becomes idle, it will end and continue to the **check** phase rather than waiting for **poll** events. + +### close callbacks + +If a socket or handle is closed abruptly (e.g. `socket.destroy()`), the `'close'` event will be emitted in this phase. Otherwise it will be emitted via `process.nextTick()`. + +## `setImmediate()` vs `setTimeout()` + +`setImmediate()` and `setTimeout()` are similar, but behave in different ways depending on when they are called. + +* `setImmediate()` is designed to execute a script once the current **poll** phase completes. +* `setTimeout()` schedules a script to be run after a minimum threshold in ms has elapsed. + +The order in which the timers are executed will vary depending on the context in which they are called. If both are called from within the main module, then timing will be bound by the performance of the process (which can be impacted by other applications running on the machine). + +For example, if we run the following script which is not within an I/O cycle (i.e. the main module), the order in which the two timers are executed is non-deterministic, as it is bound by the performance of the process: + +```js +// timeout_vs_immediate.js +setTimeout(() => { + console.log('timeout'); +}, 0); + +setImmediate(() => { + console.log('immediate'); +}); +``` + +``` +$ node timeout_vs_immediate.js +timeout +immediate + +$ node timeout_vs_immediate.js +immediate +timeout +``` + +However, if you move the two calls within an I/O cycle, the immediate callback is always executed first: + +```js +// timeout_vs_immediate.js +const fs = require('fs'); + +fs.readFile(__filename, () => { + setTimeout(() => { + console.log('timeout'); + }, 0); + setImmediate(() => { + console.log('immediate'); + }); +}); +``` + +``` +$ node timeout_vs_immediate.js +immediate +timeout + +$ node timeout_vs_immediate.js +immediate +timeout +``` + +The main advantage to using `setImmediate()` over `setTimeout()` is `setImmediate()` will always be executed before any timers if scheduled within an I/O cycle, independently of how many timers are present. + +## `process.nextTick()` + +### Understanding `process.nextTick()` + +You may have noticed that `process.nextTick()` was not displayed in the diagram, even though it's a part of the asynchronous API. This is because `process.nextTick()` is not technically part of the event loop. Instead, the `nextTickQueue` will be processed after the current operation is completed, regardless of the current phase of the event loop. Here, an *operation* is defined as a transition from the underlying C/C++ handler, and handling the JavaScript that needs to be executed. + +Looking back at our diagram, any time you call `process.nextTick()` in a given phase, all callbacks passed to `process.nextTick()` will be resolved before the event loop continues. This can create some bad situations because **it allows you to "starve" your I/O by making recursive `process.nextTick()` calls**, which prevents the event loop from reaching the **poll** phase. + +### Why would that be allowed? + +Why would something like this be included in Node.js? Part of it is a design philosophy where an API should always be asynchronous even where it doesn't have to be. Take this code snippet for example: + +```js +function apiCall(arg, callback) { + if (typeof arg !== 'string') + return process.nextTick(callback, + new TypeError('argument should be string')); +} +``` + +The snippet does an argument check and if it's not correct, it will pass the error to the callback. The API updated fairly recently to allow passing arguments to `process.nextTick()` allowing it to take any arguments passed after the callback to be propagated as the arguments to the callback so you don't have to nest functions. + +What we're doing is passing an error back to the user but only *after* we have allowed the rest of the user's code to execute. By using `process.nextTick()` we guarantee that `apiCall()` always runs its callback *after* the rest of the user's code and *before* the event loop is allowed to proceed. To achieve this, the JS call stack is allowed to unwind then immediately execute the provided callback which allows a person to make recursive calls to `process.nextTick()` without reaching a `RangeError: Maximum call stack size exceeded from v8`. + +This philosophy can lead to some potentially problematic situations. Take this snippet for example: + +```js +let bar; + +// this has an asynchronous signature, but calls callback synchronously +function someAsyncApiCall(callback) { callback(); } + +// the callback is called before `someAsyncApiCall` completes. +someAsyncApiCall(() => { + // since someAsyncApiCall hasn't completed, bar hasn't been assigned any value + console.log('bar', bar); // undefined +}); + +bar = 1; +``` + +The user defines `someAsyncApiCall()` to have an asynchronous signature, but it actually operates synchronously. When it is called, the callback provided to `someAsyncApiCall()` is called in the same phase of the event loop because `someAsyncApiCall()` doesn't actually do anything asynchronously. As a result, the callback tries to reference `bar` even though it may not have that variable in scope yet, because the script has not been able to run to completion. + +By placing the callback in a `process.nextTick()`, the script still has the ability to run to completion, allowing all the variables, functions, etc., to be initialized prior to the callback being called. It also has the advantage of not allowing the event loop to continue. It may be useful for the user to be alerted to an error before the event loop is allowed to continue. Here is the previous example using `process.nextTick()`: + +```js +let bar; + +function someAsyncApiCall(callback) { + process.nextTick(callback); +} + +someAsyncApiCall(() => { + console.log('bar', bar); // 1 +}); + +bar = 1; +``` + +Here's another real world example: + +```js +const server = net.createServer(() => {}).listen(8080); + +server.on('listening', () => {}); +``` + +When only a port is passed, the port is bound immediately. So, the `'listening'` callback could be called immediately. The problem is that the `.on('listening')` callback will not have been set by that time. + +To get around this, the `'listening'` event is queued in a `nextTick()` to allow the script to run to completion. This allows the user to set any event handlers they want. + +## `process.nextTick()` vs `setImmediate()` + +We have two calls that are similar as far as users are concerned, but their names are confusing. + +* `process.nextTick()` fires immediately on the same phase +* `setImmediate()` fires on the following iteration or 'tick' of the event loop + +In essence, the names should be swapped. `process.nextTick()` fires more immediately than `setImmediate()`, but this is an artifact of the past which is unlikely to change. Making this switch would break a large percentage of the packages on npm. Every day more new modules are being added, which means every day we wait, more potential breakages occur. While they are confusing, the names themselves won't change. + +*We recommend developers use `setImmediate()` in all cases because it's easier to reason about.* + +## Why use `process.nextTick()`? + +There are two main reasons: + +1. Allow users to handle errors, cleanup any then unneeded resources, or perhaps try the request again before the event loop continues. + +2. At times it's necessary to allow a callback to run after the call stack has unwound but before the event loop continues. + +One example is to match the user's expectations. Simple example: + +```js +const server = net.createServer(); +server.on('connection', (conn) => { }); + +server.listen(8080); +server.on('listening', () => { }); +``` + +Say that `listen()` is run at the beginning of the event loop, but the listening callback is placed in a `setImmediate()`. Unless a hostname is passed, binding to the port will happen immediately. For the event loop to proceed, it must hit the **poll** phase, which means there is a non-zero chance that a connection could have been received allowing the connection event to be fired before the listening event. + +Another example is running a function constructor that was to, say, inherit from `EventEmitter` and it wanted to call an event within the constructor: + +```js +const EventEmitter = require('events'); +const util = require('util'); + +function MyEmitter() { + EventEmitter.call(this); + this.emit('event'); +} +util.inherits(MyEmitter, EventEmitter); + +const myEmitter = new MyEmitter(); +myEmitter.on('event', () => { + console.log('an event occurred!'); +}); +``` + +You can't emit an event from the constructor immediately because the script will not have processed to the point where the user assigns a callback to that event. So, within the constructor itself, you can use `process.nextTick()` to set a callback to emit the event after the constructor has finished, which provides the expected results: + +```js +const EventEmitter = require('events'); +const util = require('util'); + +function MyEmitter() { + EventEmitter.call(this); + + // use nextTick to emit the event once a handler is assigned + process.nextTick(() => { + this.emit('event'); + }); +} +util.inherits(MyEmitter, EventEmitter); + +const myEmitter = new MyEmitter(); +myEmitter.on('event', () => { + console.log('an event occurred!'); +}); +``` diff --git a/locale/ro/docs/guides/getting-started-guide.md b/locale/ro/docs/guides/getting-started-guide.md new file mode 100644 index 000000000000..c176110b8d34 --- /dev/null +++ b/locale/ro/docs/guides/getting-started-guide.md @@ -0,0 +1,29 @@ +--- +title: Getting Started Guide +layout: docs.hbs +--- + +# How do I start with Node.js after I installed it? + +Once we have installed Node.js, let's build our first web server. Create a file named `app.js` containing the following contents: + +```javascript +const http = require('http'); + +const hostname = '127.0.0.1'; +const port = 3000; + +const server = http.createServer((req, res) => { + res.statusCode = 200; + res.setHeader('Content-Type', 'text/plain'); + res.end('Hello World'); +}); + +server.listen(port, hostname, () => { + console.log(`Server running at http://${hostname}:${port}/`); +}); +``` + +Now, run your web server using `node app.js`. Visit `http://localhost:3000` and you will see a message saying "Hello World". + +Refer to the [Introduction to Node.js](https://nodejs.dev/) for a more comprehensive guide to getting started with Node.js. diff --git a/locale/ro/docs/guides/index.md b/locale/ro/docs/guides/index.md new file mode 100644 index 000000000000..ad352ee74bac --- /dev/null +++ b/locale/ro/docs/guides/index.md @@ -0,0 +1,32 @@ +--- +title: Guides +layout: docs.hbs +--- + +# Guides + +## General + +* [Getting Started Guide](/en/docs/guides/getting-started-guide/) +* [Debugging - Getting Started](/en/docs/guides/debugging-getting-started/) +* [Easy profiling for Node.js Applications](/en/docs/guides/simple-profiling/) +* [Diagnostics - Flame Graphs](/en/docs/guides/diagnostics-flamegraph/) +* [Dockerizing a Node.js web app](/en/docs/guides/nodejs-docker-webapp/) +* [Migrating to safe Buffer constructors](/en/docs/guides/buffer-constructor-deprecation/) + +## Node.js core concepts + +* [Introduction to Node.js](https://nodejs.dev/) +* [Overview of Blocking vs Non-Blocking](/en/docs/guides/blocking-vs-non-blocking/) +* [The Node.js Event Loop, Timers, and `process.nextTick()`](/en/docs/guides/event-loop-timers-and-nexttick/) +* [Don't Block the Event Loop (or the Worker Pool)](/en/docs/guides/dont-block-the-event-loop/) +* [Timers in Node.js](/en/docs/guides/timers-in-node/) + +## Module-related guides + +* [Anatomy of an HTTP Transaction](/en/docs/guides/anatomy-of-an-http-transaction/) +* [Working with Different Filesystems](/en/docs/guides/working-with-different-filesystems/) +* [Backpressuring in Streams](/en/docs/guides/backpressuring-in-streams/) +* [Domain Module Postmortem](/en/docs/guides/domain-postmortem/) +* [How to publish N-API package](/en/docs/guides/publishing-napi-modules/) +* [ABI Stability](/en/docs/guides/abi-stability/) diff --git a/locale/ro/docs/guides/nodejs-docker-webapp.md b/locale/ro/docs/guides/nodejs-docker-webapp.md new file mode 100644 index 000000000000..27219bd037f4 --- /dev/null +++ b/locale/ro/docs/guides/nodejs-docker-webapp.md @@ -0,0 +1,237 @@ +--- +title: Dockerizing a Node.js web app +layout: docs.hbs +--- + +# Dockerizing a Node.js web app + +The goal of this example is to show you how to get a Node.js application into a Docker container. The guide is intended for development, and *not* for a production deployment. The guide also assumes you have a working [Docker installation](https://docs.docker.com/engine/installation/) and a basic understanding of how a Node.js application is structured. + +In the first part of this guide we will create a simple web application in Node.js, then we will build a Docker image for that application, and lastly we will instantiate a container from that image. + +Docker allows you to package an application with its environment and all of its dependencies into a "box", called a container. Usually, a container consists of an application running in a stripped-to-basics version of a Linux operating system. An image is the blueprint for a container, a container is a running instance of an image. + +## Create the Node.js app + +First, create a new directory where all the files would live. In this directory create a `package.json` file that describes your app and its dependencies: + +```json +{ + "name": "docker_web_app", + "version": "1.0.0", + "description": "Node.js on Docker", + "author": "First Last ", + "main": "server.js", + "scripts": { + "start": "node server.js" + }, + "dependencies": { + "express": "^4.16.1" + } +} +``` + +With your new `package.json` file, run `npm install`. If you are using `npm` version 5 or later, this will generate a `package-lock.json` file which will be copied to your Docker image. + +Then, create a `server.js` file that defines a web app using the [Express.js](https://expressjs.com/) framework: + +```javascript +'use strict'; + +const express = require('express'); + +// Constants +const PORT = 8080; +const HOST = '0.0.0.0'; + +// App +const app = express(); +app.get('/', (req, res) => { + res.send('Hello World'); +}); + +app.listen(PORT, HOST); +console.log(`Running on http://${HOST}:${PORT}`); +``` + +In the next steps, we'll look at how you can run this app inside a Docker container using the official Docker image. First, you'll need to build a Docker image of your app. + +## Creating a Dockerfile + +Create an empty file called `Dockerfile`: + +```markup +touch Dockerfile +``` + +Open the `Dockerfile` in your favorite text editor + +The first thing we need to do is define from what image we want to build from. Here we will use the latest LTS (long term support) version `10` of `node` available from the [Docker Hub](https://hub.docker.com/): + +```docker +FROM node:10 +``` + +Next we create a directory to hold the application code inside the image, this will be the working directory for your application: + +```docker +# Create app directory +WORKDIR /usr/src/app +``` + +This image comes with Node.js and NPM already installed so the next thing we need to do is to install your app dependencies using the `npm` binary. Please note that if you are using `npm` version 4 or earlier a `package-lock.json` file will *not* be generated. + +```docker +# Install app dependencies +# A wildcard is used to ensure both package.json AND package-lock.json are copied +# where available (npm@5+) +COPY package*.json ./ + +RUN npm install +# If you are building your code for production +# RUN npm ci --only=production +``` + +Note that, rather than copying the entire working directory, we are only copying the `package.json` file. This allows us to take advantage of cached Docker layers. bitJudo has a good explanation of this [here](http://bitjudo.com/blog/2014/03/13/building-efficient-dockerfiles-node-dot-js/). Furthermore, the `npm ci` command, specified in the comments, helps provide faster, reliable, reproducible builds for production environments. You can read more about this [here](https://blog.npmjs.org/post/171556855892/introducing-npm-ci-for-faster-more-reliable). + +To bundle your app's source code inside the Docker image, use the `COPY` instruction: + +```docker +# Bundle app source +COPY . . +``` + +Your app binds to port `8080` so you'll use the `EXPOSE` instruction to have it mapped by the `docker` daemon: + +```docker +EXPOSE 8080 +``` + +Last but not least, define the command to run your app using `CMD` which defines your runtime. Here we will use `node server.js` to start your server: + +```docker +CMD [ "node", "server.js" ] +``` + +Your `Dockerfile` should now look like this: + +```docker +FROM node:10 + +# Create app directory +WORKDIR /usr/src/app + +# Install app dependencies +# A wildcard is used to ensure both package.json AND package-lock.json are copied +# where available (npm@5+) +COPY package*.json ./ + +RUN npm install +# If you are building your code for production +# RUN npm ci --only=production + +# Bundle app source +COPY . . + +EXPOSE 8080 +CMD [ "node", "server.js" ] +``` + +## .dockerignore file + +Create a `.dockerignore` file in the same directory as your `Dockerfile` with following content: + +``` +node_modules +npm-debug.log +``` + +This will prevent your local modules and debug logs from being copied onto your Docker image and possibly overwriting modules installed within your image. + +## Building your image + +Go to the directory that has your `Dockerfile` and run the following command to build the Docker image. The `-t` flag lets you tag your image so it's easier to find later using the `docker images` command: + +```bash +docker build -t /node-web-app . +``` + +Your image will now be listed by Docker: + +```bash +$ docker images + +# Example +REPOSITORY TAG ID CREATED +node 10 1934b0b038d1 5 days ago +/node-web-app latest d64d3505b0d2 1 minute ago +``` + +## Run the image + +Running your image with `-d` runs the container in detached mode, leaving the container running in the background. The `-p` flag redirects a public port to a private port inside the container. Run the image you previously built: + +```bash +docker run -p 49160:8080 -d /node-web-app +``` + +Print the output of your app: + +```bash +# Get container ID +$ docker ps + +# Print app output +$ docker logs + +# Example +Running on http://localhost:8080 +``` + +If you need to go inside the container you can use the `exec` command: + +```bash +# Enter the container +$ docker exec -it /bin/bash +``` + +## Test + +To test your app, get the port of your app that Docker mapped: + +```bash +$ docker ps + +# Example +ID IMAGE COMMAND ... PORTS +ecce33b30ebf /node-web-app:latest npm start ... 49160->8080 +``` + +In the example above, Docker mapped the `8080` port inside of the container to the port `49160` on your machine. + +Now you can call your app using `curl` (install if needed via: `sudo apt-get +install curl`): + +```bash +$ curl -i localhost:49160 + +HTTP/1.1 200 OK +X-Powered-By: Express +Content-Type: text/html; charset=utf-8 +Content-Length: 12 +ETag: W/"c-M6tWOb/Y57lesdjQuHeB1P/qTV0" +Date: Mon, 13 Nov 2017 20:53:59 GMT +Connection: keep-alive + +Hello world +``` + +We hope this tutorial helped you get up and running a simple Node.js application on Docker. + +You can find more information about Docker and Node.js on Docker in the following places: + +* [Official Node.js Docker Image](https://hub.docker.com/_/node/) +* [Node.js Docker Best Practices Guide](https://github.com/nodejs/docker-node/blob/master/docs/BestPractices.md) +* [Official Docker documentation](https://docs.docker.com/) +* [Docker Tag on Stack Overflow](https://stackoverflow.com/questions/tagged/docker) +* [Docker Subreddit](https://reddit.com/r/docker) diff --git a/locale/ro/docs/guides/publishing-napi-modules.md b/locale/ro/docs/guides/publishing-napi-modules.md new file mode 100644 index 000000000000..d78432a4305d --- /dev/null +++ b/locale/ro/docs/guides/publishing-napi-modules.md @@ -0,0 +1,37 @@ +--- +title: How to publish N-API package +layout: docs.hbs +--- + +# To publish N-API version of a package alongside a non-N-API version + +The following steps are illustrated using the package `iotivity-node`: + +* First, publish the non-N-API version: + * Update the version in `package.json`. For `iotivity-node`, the version becomes `1.2.0-2`. + * Go through the release checklist (ensure tests/demos/docs are OK) + * `npm publish` +* Then, publish the N-API version: + * Update the version in `package.json`. In the case of `iotivity-node`, the version becomes `1.2.0-3`. For versioning, we recommend following the pre-release version scheme as described by [semver.org](https://semver.org/#spec-item-9) e.g. `1.2.0-napi`. + * Go through the release checklist (ensure tests/demos/docs are OK) + * `npm publish --tag n-api` + +In this example, tagging the release with `n-api` has ensured that, although version 1.2.0-3 is later than the non-N-API published version (1.2.0-2), it will not be installed if someone chooses to install `iotivity-node` by simply running `npm install iotivity-node`. This will install the non-N-API version by default. The user will have to run `npm install iotivity-node@n-api` to receive the N-API version. For more information on using tags with npm check out ["Using dist-tags"](https://docs.npmjs.com/getting-started/using-tags). + +# To introduce a dependency on an N-API version of a package + +To add the N-API version of `iotivity-node` as a dependency, the `package.json` will look like this: + +```json +"dependencies": { + "iotivity-node": "n-api" +} +``` + +**Note:** As explained in ["Using dist-tags"](https://docs.npmjs.com/getting-started/using-tags), unlike regular versions, tagged versions cannot be addressed by version ranges such as `"^2.0.0"` inside `package.json`. The reason for this is that the tag refers to exactly one version. So, if the package maintainer chooses to tag a later version of the package using the same tag, `npm update` will receive the later version. This should be acceptable given the currently experimental nature of N-API. To depend on an N-API-enabled version other than the latest published, the `package.json` dependency will have to refer to the exact version like the following: + +```json +"dependencies": { + "iotivity-node": "1.2.0-3" +} +``` diff --git a/locale/ro/docs/guides/simple-profiling.md b/locale/ro/docs/guides/simple-profiling.md new file mode 100644 index 000000000000..aa0392569fb7 --- /dev/null +++ b/locale/ro/docs/guides/simple-profiling.md @@ -0,0 +1,217 @@ +--- +title: Easy profiling for Node.js Applications +layout: docs.hbs +--- + +# Easy profiling for Node.js Applications + +There are many third party tools available for profiling Node.js applications but, in many cases, the easiest option is to use the Node.js built in profiler. The built in profiler uses the [profiler inside V8](https://v8.dev/docs/profile) which samples the stack at regular intervals during program execution. It records the results of these samples, along with important optimization events such as jit compiles, as a series of ticks: + +``` +code-creation,LazyCompile,0,0x2d5000a337a0,396,"bp native array.js:1153:16",0x289f644df68,~ +code-creation,LazyCompile,0,0x2d5000a33940,716,"hasOwnProperty native v8natives.js:198:30",0x289f64438d0,~ +code-creation,LazyCompile,0,0x2d5000a33c20,284,"ToName native runtime.js:549:16",0x289f643bb28,~ +code-creation,Stub,2,0x2d5000a33d40,182,"DoubleToIStub" +code-creation,Stub,2,0x2d5000a33e00,507,"NumberToStringStub" +``` + +In the past, you needed the V8 source code to be able to interpret the ticks. Luckily, tools have been introduced since Node.js 4.4.0 that facilitate the consumption of this information without separately building V8 from source. Let's see how the built-in profiler can help provide insight into application performance. + +To illustrate the use of the tick profiler, we will work with a simple Express application. Our application will have two handlers, one for adding new users to our system: + +```javascript +app.get('/newUser', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || users[username]) { + return res.sendStatus(400); + } + + const salt = crypto.randomBytes(128).toString('base64'); + const hash = crypto.pbkdf2Sync(password, salt, 10000, 512, 'sha512'); + + users[username] = { salt, hash }; + + res.sendStatus(200); +}); +``` + +and another for validating user authentication attempts: + +```javascript +app.get('/auth', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || !users[username]) { + return res.sendStatus(400); + } + + const { salt, hash } = users[username]; + const encryptHash = crypto.pbkdf2Sync(password, salt, 10000, 512, 'sha512'); + + if (crypto.timingSafeEqual(hash, encryptHash)) { + res.sendStatus(200); + } else { + res.sendStatus(401); + } +}); +``` + +*Please note that these are NOT recommended handlers for authenticating users in your Node.js applications and are used purely for illustration purposes. You should not be trying to design your own cryptographic authentication mechanisms in general. It is much better to use existing, proven authentication solutions.* + +Now assume that we've deployed our application and users are complaining about high latency on requests. We can easily run the app with the built in profiler: + +``` +NODE_ENV=production node --prof app.js +``` + +and put some load on the server using `ab` (ApacheBench): + +``` +curl -X GET "http://localhost:8080/newUser?username=matt&password=password" +ab -k -c 20 -n 250 "http://localhost:8080/auth?username=matt&password=password" +``` + +and get an ab output of: + +``` +Concurrency Level: 20 +Time taken for tests: 46.932 seconds +Complete requests: 250 +Failed requests: 0 +Keep-Alive requests: 250 +Total transferred: 50250 bytes +HTML transferred: 500 bytes +Requests per second: 5.33 [#/sec] (mean) +Time per request: 3754.556 [ms] (mean) +Time per request: 187.728 [ms] (mean, across all concurrent requests) +Transfer rate: 1.05 [Kbytes/sec] received + +... + +Percentage of the requests served within a certain time (ms) + 50% 3755 + 66% 3804 + 75% 3818 + 80% 3825 + 90% 3845 + 95% 3858 + 98% 3874 + 99% 3875 + 100% 4225 (longest request) +``` + +From this output, we see that we're only managing to serve about 5 requests per second and that the average request takes just under 4 seconds round trip. In a real world example, we could be doing lots of work in many functions on behalf of a user request but even in our simple example, time could be lost compiling regular expressions, generating random salts, generating unique hashes from user passwords, or inside the Express framework itself. + +Since we ran our application using the `--prof` option, a tick file was generated in the same directory as your local run of the application. It should have the form `isolate-0xnnnnnnnnnnnn-v8.log` (where `n` is a digit). + +In order to make sense of this file, we need to use the tick processor bundled with the Node.js binary. To run the processor, use the `--prof-process` flag: + +``` +node --prof-process isolate-0xnnnnnnnnnnnn-v8.log > processed.txt +``` + +Opening processed.txt in your favorite text editor will give you a few different types of information. The file is broken up into sections which are again broken up by language. First, we look at the summary section and see: + +``` + [Summary]: + ticks total nonlib name + 79 0.2% 0.2% JavaScript + 36703 97.2% 99.2% C++ + 7 0.0% 0.0% GC + 767 2.0% Shared libraries + 215 0.6% Unaccounted +``` + +This tells us that 97% of all samples gathered occurred in C++ code and that when viewing other sections of the processed output we should pay most attention to work being done in C++ (as opposed to JavaScript). With this in mind, we next find the [C++] section which contains information about which C++ functions are taking the most CPU time and see: + +``` + [C++]: + ticks total nonlib name + 19557 51.8% 52.9% node::crypto::PBKDF2(v8::FunctionCallbackInfo const&) + 4510 11.9% 12.2% _sha1_block_data_order + 3165 8.4% 8.6% _malloc_zone_malloc +``` + +We see that the top 3 entries account for 72.1% of CPU time taken by the program. From this output, we immediately see that at least 51.8% of CPU time is taken up by a function called PBKDF2 which corresponds to our hash generation from a user's password. However, it may not be immediately obvious how the lower two entries factor into our application (or if it is we will pretend otherwise for the sake of example). To better understand the relationship between these functions, we will next look at the [Bottom up (heavy) profile] section which provides information about the primary callers of each function. Examining this section, we find: + +``` + ticks parent name + 19557 51.8% node::crypto::PBKDF2(v8::FunctionCallbackInfo const&) + 19557 100.0% v8::internal::Builtins::~Builtins() + 19557 100.0% LazyCompile: ~pbkdf2 crypto.js:557:16 + + 4510 11.9% _sha1_block_data_order + 4510 100.0% LazyCompile: *pbkdf2 crypto.js:557:16 + 4510 100.0% LazyCompile: *exports.pbkdf2Sync crypto.js:552:30 + + 3165 8.4% _malloc_zone_malloc + 3161 99.9% LazyCompile: *pbkdf2 crypto.js:557:16 + 3161 100.0% LazyCompile: *exports.pbkdf2Sync crypto.js:552:30 +``` + +Parsing this section takes a little more work than the raw tick counts above. Within each of the "call stacks" above, the percentage in the parent column tells you the percentage of samples for which the function in the row above was called by the function in the current row. For example, in the middle "call stack" above for _sha1_block_data_order, we see that `_sha1_block_data_order` occurred in 11.9% of samples, which we knew from the raw counts above. However, here, we can also tell that it was always called by the pbkdf2 function inside the Node.js crypto module. We see that similarly, `_malloc_zone_malloc` was called almost exclusively by the same pbkdf2 function. Thus, using the information in this view, we can tell that our hash computation from the user's password accounts not only for the 51.8% from above but also for all CPU time in the top 3 most sampled functions since the calls to `_sha1_block_data_order` and `_malloc_zone_malloc` were made on behalf of the pbkdf2 function. + +At this point, it is very clear that the password based hash generation should be the target of our optimization. Thankfully, you've fully internalized the [benefits of asynchronous programming](https://nodesource.com/blog/why-asynchronous) and you realize that the work to generate a hash from the user's password is being done in a synchronous way and thus tying down the event loop. This prevents us from working on other incoming requests while computing a hash. + +To remedy this issue, you make a small modification to the above handlers to use the asynchronous version of the pbkdf2 function: + +```javascript +app.get('/auth', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || !users[username]) { + return res.sendStatus(400); + } + + crypto.pbkdf2(password, users[username].salt, 10000, 512, 'sha512', (err, hash) => { + if (users[username].hash.toString() === hash.toString()) { + res.sendStatus(200); + } else { + res.sendStatus(401); + } + }); +}); +``` + +A new run of the ab benchmark above with the asynchronous version of your app yields: + +``` +Concurrency Level: 20 +Time taken for tests: 12.846 seconds +Complete requests: 250 +Failed requests: 0 +Keep-Alive requests: 250 +Total transferred: 50250 bytes +HTML transferred: 500 bytes +Requests per second: 19.46 [#/sec] (mean) +Time per request: 1027.689 [ms] (mean) +Time per request: 51.384 [ms] (mean, across all concurrent requests) +Transfer rate: 3.82 [Kbytes/sec] received + +... + +Percentage of the requests served within a certain time (ms) + 50% 1018 + 66% 1035 + 75% 1041 + 80% 1043 + 90% 1049 + 95% 1063 + 98% 1070 + 99% 1071 + 100% 1079 (longest request) +``` + +Yay! Your app is now serving about 20 requests per second, roughly 4 times more than it was with the synchronous hash generation. Additionally, the average latency is down from the 4 seconds before to just over 1 second. + +Hopefully, through the performance investigation of this (admittedly contrived) example, you've seen how the V8 tick processor can help you gain a better understanding of the performance of your Node.js applications. diff --git a/locale/ro/docs/guides/timers-in-node.md b/locale/ro/docs/guides/timers-in-node.md new file mode 100644 index 000000000000..4cf765124e51 --- /dev/null +++ b/locale/ro/docs/guides/timers-in-node.md @@ -0,0 +1,125 @@ +--- +title: Timers in Node.js +layout: docs.hbs +--- + +# Timers in Node.js and beyond + +The Timers module in Node.js contains functions that execute code after a set period of time. Timers do not need to be imported via `require()`, since all the methods are available globally to emulate the browser JavaScript API. To fully understand when timer functions will be executed, it's a good idea to read up on the Node.js [Event Loop](/en/docs/guides/event-loop-timers-and-nexttick/). + +## Controlling the Time Continuum with Node.js + +The Node.js API provides several ways of scheduling code to execute at some point after the present moment. The functions below may seem familiar, since they are available in most browsers, but Node.js actually provides its own implementation of these methods. Timers integrate very closely with the system, and despite the fact that the API mirrors the browser API, there are some differences in implementation. + +### "When I say so" Execution ~ *`setTimeout()`* + +`setTimeout()` can be used to schedule code execution after a designated amount of milliseconds. This function is similar to [`window.setTimeout()`](https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/setTimeout) from the browser JavaScript API, however a string of code cannot be passed to be executed. + +`setTimeout()` accepts a function to execute as its first argument and the millisecond delay defined as a number as the second argument. Additional arguments may also be included and these will be passed on to the function. Here is an example of that: + +```js +function myFunc(arg) { + console.log(`arg was => ${arg}`); +} + +setTimeout(myFunc, 1500, 'funky'); +``` + +The above function `myFunc()` will execute as close to 1500 milliseconds (or 1.5 seconds) as possible due to the call of `setTimeout()`. + +The timeout interval that is set cannot be relied upon to execute after that *exact* number of milliseconds. This is because other executing code that blocks or holds onto the event loop will push the execution of the timeout back. The *only* guarantee is that the timeout will not execute *sooner* than the declared timeout interval. + +`setTimeout()` returns a `Timeout` object that can be used to reference the timeout that was set. This returned object can be used to cancel the timeout ( see `clearTimeout()` below) as well as change the execution behavior (see `unref()` below). + +### "Right after this" Execution ~ *`setImmediate()`* + +`setImmediate()` will execute code at the end of the current event loop cycle. This code will execute *after* any I/O operations in the current event loop and *before* any timers scheduled for the next event loop. This code execution could be thought of as happening "right after this", meaning any code following the `setImmediate()` function call will execute before the `setImmediate()` function argument. + +The first argument to `setImmediate()` will be the function to execute. Any subsequent arguments will be passed to the function when it is executed. Here's an example: + +```js +console.log('before immediate'); + +setImmediate((arg) => { + console.log(`executing immediate: ${arg}`); +}, 'so immediate'); + +console.log('after immediate'); +``` + +The above function passed to `setImmediate()` will execute after all runnable code has executed, and the console output will be: + +``` +before immediate +after immediate +executing immediate: so immediate +``` + +`setImmediate()` returns an `Immediate` object, which can be used to cancel the scheduled immediate (see `clearImmediate()` below). + +Note: Don't get `setImmediate()` confused with `process.nextTick()`. There are some major ways they differ. The first is that `process.nextTick()` will run *before* any `Immediate`s that are set as well as before any scheduled I/O. The second is that `process.nextTick()` is non-clearable, meaning once code has been scheduled to execute with `process.nextTick()`, the execution cannot be stopped, just like with a normal function. Refer to [this guide](/en/docs/guides/event-loop-timers-and-nexttick/#process-nexttick) to better understand the operation of `process.nextTick()`. + +### "Infinite Loop" Execution ~ *`setInterval()`* + +If there is a block of code that should execute multiple times, `setInterval()` can be used to execute that code. `setInterval()` takes a function argument that will run an infinite number of times with a given millisecond delay as the second argument. Just like `setTimeout()`, additional arguments can be added beyond the delay, and these will be passed on to the function call. Also like `setTimeout()`, the delay cannot be guaranteed because of operations that may hold on to the event loop, and therefore should be treated as an approximate delay. See the below example: + +```js +function intervalFunc() { + console.log('Cant stop me now!'); +} + +setInterval(intervalFunc, 1500); +``` + +In the above example, `intervalFunc()` will execute about every 1500 milliseconds, or 1.5 seconds, until it is stopped (see below). + +Just like `setTimeout()`, `setInterval()` also returns a `Timeout` object which can be used to reference and modify the interval that was set. + +## Clearing the Future + +What can be done if a `Timeout` or `Immediate` object needs to be cancelled? `setTimeout()`, `setImmediate()`, and `setInterval()` return a timer object that can be used to reference the set `Timeout` or `Immediate` object. By passing said object into the respective `clear` function, execution of that object will be halted completely. The respective functions are `clearTimeout()`, `clearImmediate()`, and `clearInterval()`. See the example below for an example of each: + +```js +const timeoutObj = setTimeout(() => { + console.log('timeout beyond time'); +}, 1500); + +const immediateObj = setImmediate(() => { + console.log('immediately executing immediate'); +}); + +const intervalObj = setInterval(() => { + console.log('interviewing the interval'); +}, 500); + +clearTimeout(timeoutObj); +clearImmediate(immediateObj); +clearInterval(intervalObj); +``` + +## Leaving Timeouts Behind + +Remember that `Timeout` objects are returned by `setTimeout` and `setInterval`. The `Timeout` object provides two functions intended to augment `Timeout` behavior with `unref()` and `ref()`. If there is a `Timeout` object scheduled using a `set` function, `unref()` can be called on that object. This will change the behavior slightly, and not call the `Timeout` object *if it is the last code to execute*. The `Timeout` object will not keep the process alive, waiting to execute. + +In similar fashion, a `Timeout` object that has had `unref()` called on it can remove that behavior by calling `ref()` on that same `Timeout` object, which will then ensure its execution. Be aware, however, that this does not *exactly* restore the initial behavior for performance reasons. See below for examples of both: + +```js +const timerObj = setTimeout(() => { + console.log('will i run?'); +}); + +// if left alone, this statement will keep the above +// timeout from running, since the timeout will be the only +// thing keeping the program from exiting +timerObj.unref(); + +// we can bring it back to life by calling ref() inside +// an immediate +setImmediate(() => { + timerObj.ref(); +}); +``` + +## Further Down the Event Loop + +There's much more to the Event Loop and Timers than this guide has covered. To learn more about the internals of the Node.js Event Loop and how Timers operate during execution, check out this Node.js guide: [The Node.js Event Loop, Timers, and process.nextTick()](/en/docs/guides/event-loop-timers-and-nexttick/). diff --git a/locale/ro/docs/guides/working-with-different-filesystems.md b/locale/ro/docs/guides/working-with-different-filesystems.md new file mode 100644 index 000000000000..f4b875c0da31 --- /dev/null +++ b/locale/ro/docs/guides/working-with-different-filesystems.md @@ -0,0 +1,90 @@ +--- +title: Working with Different Filesystems +layout: docs.hbs +--- + +# Working with Different Filesystems + +Node.js exposes many features of the filesystem. But not all filesystems are alike. The following are suggested best practices to keep your code simple and safe when working with different filesystems. + +## Filesystem Behavior + +Before you can work with a filesystem, you need to know how it behaves. Different filesystems behave differently and have more or less features than others: case sensitivity, case insensitivity, case preservation, Unicode form preservation, timestamp resolution, extended attributes, inodes, Unix permissions, alternate data streams etc. + +Be wary of inferring filesystem behavior from `process.platform`. For example, do not assume that because your program is running on Darwin that you are therefore working on a case-insensitive filesystem (HFS+), as the user may be using a case-sensitive filesystem (HFSX). Similarly, do not assume that because your program is running on Linux that you are therefore working on a filesystem which supports Unix permissions and inodes, as you may be on a particular external drive, USB or network drive which does not. + +The operating system may not make it easy to infer filesystem behavior, but all is not lost. Instead of keeping a list of every known filesystem and behavior (which is always going to be incomplete), you can probe the filesystem to see how it actually behaves. The presence or absence of certain features which are easy to probe, are often enough to infer the behavior of other features which are more difficult to probe. + +Remember that some users may have different filesystems mounted at various paths in the working tree. + +## Avoid a Lowest Common Denominator Approach + +You might be tempted to make your program act like a lowest common denominator filesystem, by normalizing all filenames to uppercase, normalizing all filenames to NFC Unicode form, and normalizing all file timestamps to say 1-second resolution. This would be the lowest common denominator approach. + +Do not do this. You would only be able to interact safely with a filesystem which has the exact same lowest common denominator characteristics in every respect. You would be unable to work with more advanced filesystems in the way that users expect, and you would run into filename or timestamp collisions. You would most certainly lose and corrupt user data through a series of complicated dependent events, and you would create bugs that would be difficult if not impossible to solve. + +What happens when you later need to support a filesystem that only has 2-second or 24-hour timestamp resolution? What happens when the Unicode standard advances to include a slightly different normalization algorithm (as has happened in the past)? + +A lowest common denominator approach would tend to try to create a portable program by using only "portable" system calls. This leads to programs that are leaky and not in fact portable. + +## Adopt a Superset Approach + +Make the best use of each platform you support by adopting a superset approach. For example, a portable backup program should sync btimes (the created time of a file or folder) correctly between Windows systems, and should not destroy or alter btimes, even though btimes are not supported on Linux systems. The same portable backup program should sync Unix permissions correctly between Linux systems, and should not destroy or alter Unix permissions, even though Unix permissions are not supported on Windows systems. + +Handle different filesystems by making your program act like a more advanced filesystem. Support a superset of all possible features: case-sensitivity, case-preservation, Unicode form sensitivity, Unicode form preservation, Unix permissions, high-resolution nanosecond timestamps, extended attributes etc. + +Once you have case-preservation in your program, you can always implement case-insensitivity if you need to interact with a case-insensitive filesystem. But if you forego case-preservation in your program, you cannot interact safely with a case-preserving filesystem. The same is true for Unicode form preservation and timestamp resolution preservation. + +If a filesystem provides you with a filename in a mix of lowercase and uppercase, then keep the filename in the exact case given. If a filesystem provides you with a filename in mixed Unicode form or NFC or NFD (or NFKC or NFKD), then keep the filename in the exact byte sequence given. If a filesystem provides you with a millisecond timestamp, then keep the timestamp in millisecond resolution. + +When you work with a lesser filesystem, you can always downsample appropriately, with comparison functions as required by the behavior of the filesystem on which your program is running. If you know that the filesystem does not support Unix permissions, then you should not expect to read the same Unix permissions you write. If you know that the filesystem does not preserve case, then you should be prepared to see `ABC` in a directory listing when your program creates `abc`. But if you know that the filesystem does preserve case, then you should consider `ABC` to be a different filename to `abc`, when detecting file renames or if the filesystem is case-sensitive. + +## Case Preservation + +You may create a directory called `test/abc` and be surprised to see sometimes that `fs.readdir('test')` returns `['ABC']`. This is not a bug in Node. Node returns the filename as the filesystem stores it, and not all filesystems support case-preservation. Some filesystems convert all filenames to uppercase (or lowercase). + +## Unicode Form Preservation + +*Case preservation and Unicode form preservation are similar concepts. To understand why Unicode form should be preserved , make sure that you first understand why case should be preserved. Unicode form preservation is just as simple when understood correctly.* + +Unicode can encode the same characters using several different byte sequences. Several strings may look the same, but have different byte sequences. When working with UTF-8 strings, be careful that your expectations are in line with how Unicode works. Just as you would not expect all UTF-8 characters to encode to a single byte, you should not expect several UTF-8 strings that look the same to the human eye to have the same byte representation. This may be an expectation that you can have of ASCII, but not of UTF-8. + +You may create a directory called `test/café` (NFC Unicode form with byte sequence `<63 61 66 c3 a9>` and `string.length === 5`) and be surprised to see sometimes that `fs.readdir('test')` returns `['café']` (NFD Unicode form with byte sequence `<63 61 66 65 cc 81>` and `string.length === 6`). This is not a bug in Node. Node.js returns the filename as the filesystem stores it, and not all filesystems support Unicode form preservation. + +HFS+, for example, will normalize all filenames to a form almost always the same as NFD form. Do not expect HFS+ to behave the same as NTFS or EXT4 and vice-versa. Do not try to change data permanently through normalization as a leaky abstraction to paper over Unicode differences between filesystems. This would create problems without solving any. Rather, preserve Unicode form and use normalization as a comparison function only. + +## Unicode Form Insensitivity + +Unicode form insensitivity and Unicode form preservation are two different filesystem behaviors often mistaken for each other. Just as case-insensitivity has sometimes been incorrectly implemented by permanently normalizing filenames to uppercase when storing and transmitting filenames, so Unicode form insensitivity has sometimes been incorrectly implemented by permanently normalizing filenames to a certain Unicode form (NFD in the case of HFS+) when storing and transmitting filenames. It is possible and much better to implement Unicode form insensitivity without sacrificing Unicode form preservation, by using Unicode normalization for comparison only. + +## Comparing Different Unicode Forms + +Node.js provides `string.normalize('NFC' / 'NFD')` which you can use to normalize a UTF-8 string to either NFC or NFD. You should never store the output from this function but only use it as part of a comparison function to test whether two UTF-8 strings would look the same to the user. + +You can use `string1.normalize('NFC') === string2.normalize('NFC')` or `string1.normalize('NFD') === string2.normalize('NFD')` as your comparison function. Which form you use does not matter. + +Normalization is fast but you may want to use a cache as input to your comparison function to avoid normalizing the same string many times over. If the string is not present in the cache then normalize it and cache it. Be careful not to store or persist the cache, use it only as a cache. + +Note that using `normalize()` requires that your version of Node.js include ICU (otherwise `normalize()` will just return the original string). If you download the latest version of Node.js from the website then it will include ICU. + +## Timestamp Resolution + +You may set the `mtime` (the modified time) of a file to `1444291759414` (millisecond resolution) and be surprised to see sometimes that `fs.stat` returns the new mtime as `1444291759000` (1-second resolution) or `1444291758000` (2-second resolution). This is not a bug in Node. Node.js returns the timestamp as the filesystem stores it, and not all filesystems support nanosecond, millisecond or 1-second timestamp resolution. Some filesystems even have very coarse resolution for the atime timestamp in particular, e.g. 24 hours for some FAT filesystems. + +## Do Not Corrupt Filenames and Timestamps Through Normalization + +Filenames and timestamps are user data. Just as you would never automatically rewrite user file data to uppercase the data or normalize `CRLF` to `LF` line-endings, so you should never change, interfere or corrupt filenames or timestamps through case / Unicode form / timestamp normalization. Normalization should only ever be used for comparison, never for altering data. + +Normalization is effectively a lossy hash code. You can use it to test for certain kinds of equivalence (e.g. do several strings look the same even though they have different byte sequences) but you can never use it as a substitute for the actual data. Your program should pass on filename and timestamp data as is. + +Your program can create new data in NFC (or in any combination of Unicode form it prefers) or with a lowercase or uppercase filename, or with a 2-second resolution timestamp, but your program should not corrupt existing user data by imposing case / Unicode form / timestamp normalization. Rather, adopt a superset approach and preserve case, Unicode form and timestamp resolution in your program. That way, you will be able to interact safely with filesystems which do the same. + +## Use Normalization Comparison Functions Appropriately + +Make sure that you use case / Unicode form / timestamp comparison functions appropriately. Do not use a case-insensitive filename comparison function if you are working on a case-sensitive filesystem. Do not use a Unicode form insensitive comparison function if you are working on a Unicode form sensitive filesystem (e.g. NTFS and most Linux filesystems which preserve both NFC and NFD or mixed Unicode forms). Do not compare timestamps at 2-second resolution if you are working on a nanosecond timestamp resolution filesystem. + +## Be Prepared for Slight Differences in Comparison Functions + +Be careful that your comparison functions match those of the filesystem (or probe the filesystem if possible to see how it would actually compare). Case-insensitivity for example is more complex than a simple `toLowerCase()` comparison. In fact, `toUpperCase()` is usually better than `toLowerCase()` (since it handles certain foreign language characters differently). But better still would be to probe the filesystem since every filesystem has its own case comparison table baked in. + +As an example, Apple's HFS+ normalizes filenames to NFD form but this NFD form is actually an older version of the current NFD form and may sometimes be slightly different from the latest Unicode standard's NFD form. Do not expect HFS+ NFD to be exactly the same as Unicode NFD all the time. diff --git a/locale/ro/docs/index.md b/locale/ro/docs/index.md new file mode 100644 index 000000000000..9b107a0384ed --- /dev/null +++ b/locale/ro/docs/index.md @@ -0,0 +1,48 @@ +--- +title: Docs +layout: docs.hbs +labels: + lts: LTS +--- + +# About Docs + +There are several types of documentation available on this website: + +* API reference documentation +* ES6 features +* Guides + +## API Reference Documentation + +The [API reference documentation](https://nodejs.org/api/) provides detailed information about a function or object in Node.js. This documentation indicates what arguments a method accepts, the return value of that method, and what errors may be related to that method. It also indicates which methods are available for different versions of Node.js. + +This documentation describes the built-in modules provided by Node.js. It does not document modules provided by the community. + +
+ +### Looking for API docs of previous releases? + +* [Node.js 13.x](https://nodejs.org/docs/latest-v13.x/api/) +* [Node.js 12.x](https://nodejs.org/docs/latest-v12.x/api/) +* [Node.js 11.x](https://nodejs.org/docs/latest-v11.x/api/) +* [Node.js 10.x](https://nodejs.org/docs/latest-v10.x/api/) +* [Node.js 9.x](https://nodejs.org/docs/latest-v9.x/api/) +* [Node.js 8.x](https://nodejs.org/docs/latest-v8.x/api/) +* [Node.js 7.x](https://nodejs.org/docs/latest-v7.x/api/) +* [Node.js 6.x](https://nodejs.org/docs/latest-v6.x/api/) +* [Node.js 5.x](https://nodejs.org/docs/latest-v5.x/api/) +* [Node.js 4.x](https://nodejs.org/docs/latest-v4.x/api/) +* [Node.js 0.12.x](https://nodejs.org/docs/latest-v0.12.x/api/) +* [Node.js 0.10.x](https://nodejs.org/docs/latest-v0.10.x/api/) +* [All versions](https://nodejs.org/docs/) + +
+ +## ES6 Features + +The [ES6 section](/en/docs/es6/) describes the three ES6 feature groups, and details which features are enabled by default in Node.js, alongside explanatory links. It also shows how to find which version of V8 shipped with a particular Node.js release. + +## Guides + +The [Guides section](/en/docs/guides/) has long-form, in-depth articles about Node.js technical features and capabilities. diff --git a/locale/ro/docs/meta/topics/dependencies.md b/locale/ro/docs/meta/topics/dependencies.md new file mode 100644 index 000000000000..db12c22a2ace --- /dev/null +++ b/locale/ro/docs/meta/topics/dependencies.md @@ -0,0 +1,78 @@ +--- +title: Dependencies +layout: docs.hbs +--- + +# Dependencies + +There are several dependencies that Node.js relies on to work the way it does. + +* [Libraries](#libraries) + * [V8](#v8) + * [libuv](#libuv) + * [llhttp](#llhttp) + * [c-ares](#c-ares) + * [OpenSSL](#openssl) + * [zlib](#zlib) +* [Tools](#tools) + * [npm](#npm) + * [gyp](#gyp) + * [gtest](#gtest) + +## Libraries + +### V8 + +The V8 library provides Node.js with a JavaScript engine, which Node.js controls via the V8 C++ API. V8 is maintained by Google, for use in Chrome. + +* [Documentation](https://v8.dev/docs) + +### libuv + +Another important dependency is libuv, a C library that is used to abstract non-blocking I/O operations to a consistent interface across all supported platforms. It provides mechanisms to handle file system, DNS, network, child processes, pipes, signal handling, polling and streaming. It also includes a thread pool for offloading work for some things that can't be done asynchronously at the operating system level. + +* [Documentation](http://docs.libuv.org/) + +### llhttp + +HTTP parsing is handled by a lightweight TypeScript and C library called llhttp. It is designed to not make any syscalls or allocations, so it has a very small per-request memory footprint. + +* [Documentation](https://github.com/nodejs/llhttp) + +### c-ares + +For some asynchronous DNS requests, Node.js uses a C library called c-ares. It is exposed through the DNS module in JavaScript as the `resolve()` family of functions. The `lookup()` function, which is what the rest of core uses, makes use of threaded `getaddrinfo(3)` calls in libuv. The reason for this is that c-ares supports /etc/hosts, /etc/resolv.conf and /etc/svc.conf, but not things like mDNS. + +* [Documentation](https://c-ares.haxx.se/docs.html) + +### OpenSSL + +OpenSSL is used extensively in both the `tls` and `crypto` modules. It provides battle-tested implementations of many cryptographic functions that the modern web relies on for security. + +* [Documentation](https://www.openssl.org/docs/) + +### zlib + +For fast compression and decompression, Node.js relies on the industry-standard zlib library, also known for its use in gzip and libpng. Node.js uses zlib to create sync, async and streaming compression and decompression interfaces. + +* [Documentation](https://www.zlib.net/manual.html) + +## Tools + +### npm + +Node.js is all about modularity, and with that comes the need for a quality package manager; for this purpose, npm was made. With npm comes the largest selection of community-created packages of any programming ecosystem, which makes building Node.js apps quick and easy. + +* [Documentation](https://docs.npmjs.com/) + +### gyp + +The build system is handled by gyp, a python-based project generator copied from V8. It can generate project files for use with build systems across many platforms. Node.js requires a build system because large parts of it — and its dependencies — are written in languages that require compilation. + +* [Documentation](https://gyp.gsrc.io/docs/UserDocumentation.md) + +### gtest + +Native code can be tested using gtest, which is taken from Chromium. It allows testing C/C++ without needing an existing node executable to bootstrap from. + +* [Documentation](https://code.google.com/p/googletest/wiki/V1_7_Documentation) diff --git a/locale/ro/download/current.md b/locale/ro/download/current.md new file mode 100644 index 000000000000..57f9bc938c38 --- /dev/null +++ b/locale/ro/download/current.md @@ -0,0 +1,35 @@ +--- +layout: download-current.hbs +title: Download +download: Download +downloads: + headline: Downloads + lts: LTS + current: Current + tagline-current: Latest Features + tagline-lts: Recommended For Most Users + display-hint: Display downloads for + intro: > + Download the Node.js source code or a pre-built installer for your platform, and start developing today. + currentVersion: Latest Current Version + buildInstructions: Building Node.js from source on supported platforms + WindowsInstaller: Windows Installer + WindowsBinary: Windows Binary + MacOSInstaller: macOS Installer + MacOSBinary: macOS Binary + LinuxBinaries: Linux Binaries + SourceCode: Source Code +additional: + headline: Additional Platforms + intro: > + Members of the Node.js community maintain unofficial builds of Node.js for additional platforms. Note that such builds are not supported by the Node.js core team and may not yet be at the same build level as current Node.js release. + platform: Platform + provider: Provider + SmartOSBinaries: SmartOS Binaries + DockerImage: Docker Image + officialDockerImage: Official Node.js Docker Image + LinuxPowerSystems: Linux on Power LE Systems + LinuxSystemZ: Linux on System z + AIXPowerSystems: AIX on Power Systems +--- + diff --git a/locale/ro/download/index.md b/locale/ro/download/index.md new file mode 100644 index 000000000000..c9334c81b158 --- /dev/null +++ b/locale/ro/download/index.md @@ -0,0 +1,35 @@ +--- +layout: download.hbs +title: Download +download: Download +downloads: + headline: Downloads + lts: LTS + current: Current + tagline-current: Latest Features + tagline-lts: Recommended For Most Users + display-hint: Display downloads for + intro: > + Download the Node.js source code or a pre-built installer for your platform, and start developing today. + currentVersion: Latest LTS Version + buildInstructions: Building Node.js from source on supported platforms + WindowsInstaller: Windows Installer + WindowsBinary: Windows Binary + MacOSInstaller: macOS Installer + MacOSBinary: macOS Binary + LinuxBinaries: Linux Binaries + SourceCode: Source Code +additional: + headline: Additional Platforms + intro: > + Members of the Node.js community maintain unofficial builds of Node.js for additional platforms. Note that such builds are not supported by the Node.js core team and may not yet be at the same build level as current Node.js release. + platform: Platform + provider: Provider + SmartOSBinaries: SmartOS Binaries + DockerImage: Docker Image + officialDockerImage: Official Node.js Docker Image + LinuxPowerSystems: Linux on Power LE Systems + LinuxSystemZ: Linux on System z + AIXPowerSystems: AIX on Power Systems +--- + diff --git a/locale/ro/download/package-manager.md b/locale/ro/download/package-manager.md new file mode 100644 index 000000000000..7be271f9a17c --- /dev/null +++ b/locale/ro/download/package-manager.md @@ -0,0 +1,243 @@ +--- +layout: page.hbs +title: Installing Node.js via package manager +--- + +# Installing Node.js via package manager + +***Note:*** The packages on this page are maintained and supported by their respective packagers, **not** the Node.js core team. Please report any issues you encounter to the package maintainer. If it turns out your issue is a bug in Node.js itself, the maintainer will report the issue upstream. + +--- + +* [Android](#android) +* [Arch Linux](#arch-linux) +* [Debian and Ubuntu based Linux distributions, Enterprise Linux/Fedora and Snap packages](#debian-and-ubuntu-based-linux-distributions-enterprise-linux-fedora-and-snap-packages) +* [FreeBSD](#freebsd) +* [Gentoo](#gentoo) +* [IBM i](#ibm-i) +* [NetBSD](#netbsd) +* [nvm](#nvm) +* [OpenBSD](#openbsd) +* [openSUSE and SLE](#opensuse-and-sle) +* [macOS](#macos) +* [SmartOS and illumos](#smartos-and-illumos) +* [Solus](#solus) +* [Void Linux](#void-linux) +* [Windows](#windows) + +--- + +## Android + +Android support is still experimental in Node.js, so precompiled binaries are not yet provided by Node.js developers. + +However, there are some third-party solutions. For example, [Termux](https://termux.com/) community provides terminal emulator and Linux environment for Android, as well as own package manager and [extensive collection](https://github.com/termux/termux-packages) of many precompiled applications. This command in Termux app will install the last available Node.js version: + +```bash +pkg install nodejs +``` + +Currently, Termux Node.js binaries are linked against `system-icu` (depending on `libicu` package). + +## Arch Linux + +Node.js and npm packages are available in the Community Repository. + +```bash +pacman -S nodejs npm +``` + +## Debian and Ubuntu based Linux distributions, Enterprise Linux/Fedora and Snap packages + +[Node.js binary distributions](https://github.com/nodesource/distributions/blob/master/README.md) are available from NodeSource. + +## FreeBSD + +The most recent release of Node.js is available via the [www/node](https://www.freshports.org/www/node) port. + +Install a binary package via [pkg](https://www.freebsd.org/cgi/man.cgi?pkg): + +```bash +pkg install node +``` + +Or compile it on your own using [ports](https://www.freebsd.org/cgi/man.cgi?ports): + +```bash +cd /usr/ports/www/node && make install +``` + +## Gentoo + +Node.js is available in the portage tree. + +```bash +emerge nodejs +``` + +## IBM i + +LTS versions of Node.js are available from IBM, and are available via [the 'yum' package manager](https://ibm.biz/ibmi-rpms). The package name is `nodejs` followed by the major version number (for instance, `nodejs8`, `nodejs10`, `nodejs12`, etc) + +To install Node.js 12.x from the command line, run the following as a user with \*ALLOBJ special authority: + +```bash +yum install nodejs12 +``` + +Node.js can also be installed with the IBM i Access Client Solutions product. See [this support document](http://www-01.ibm.com/support/docview.wss?uid=nas8N1022619) for more details + +## NetBSD + +Node.js is available in the pkgsrc tree: + +```bash +cd /usr/pkgsrc/lang/nodejs && make install +``` + +Or install a binary package (if available for your platform) using pkgin: + +```bash +pkgin -y install nodejs +``` + +## nvm +Node Version Manager is a bash script used to manage multiple released Node.js versions. It allows you to perform operations like install, uninstall, switch version, etc. To install nvm, use this [install script](https://github.com/nvm-sh/nvm#install--update-script). + +On Unix / OS X systems Node.js built from source can be installed using [nvm](https://github.com/creationix/nvm) by installing into the location that nvm expects: + +```bash +env VERSION=`python tools/getnodeversion.py` make install DESTDIR=`nvm_version_path v$VERSION` PREFIX="" +``` + +After this you can use `nvm` to switch between released versions and versions built from source. For example, if the version of Node.js is v8.0.0-pre: + +```bash +nvm use 8 +``` + +Once the official release is out you will want to uninstall the version built from source: + +```bash +nvm uninstall 8 +``` + +## OpenBSD + +Node.js is available through the ports system. + +```bash +/usr/ports/lang/node +``` + +Using [pkg_add](https://man.openbsd.org/OpenBSD-current/man1/pkg_add.1) on OpenBSD: + +```bash +pkg_add node +``` + +## openSUSE and SLE + +Node.js is available in the main repositories under the following packages: + +* **openSUSE Leap 42.2**: `nodejs4` +* **openSUSE Leap 42.3**: `nodejs4`, `nodejs6` +* **openSUSE Tumbleweed**: `nodejs4`, `nodejs6`, `nodejs8` +* **SUSE Linux Enterprise Server (SLES) 12**: `nodejs4`, `nodejs6` (The "Web and Scripting Module" must be [added before installing](https://www.suse.com/documentation/sles-12/book_sle_deployment/data/sec_add-ons_extensions.html).) + +For example, to install Node.js 4.x on openSUSE Leap 42.2, run the following as root: + +```bash +zypper install nodejs4 +``` + +## macOS + +Simply download the [macOS Installer](https://nodejs.org/en/#home-downloadhead) directly from the [nodejs.org](https://nodejs.org/) web site. + +_If you want to download the package with bash:_ + +```bash +curl "https://nodejs.org/dist/latest/node-${VERSION:-$(wget -qO- https://nodejs.org/dist/latest/ | sed -nE 's|.*>node-(.*)\.pkg.*|\1|p')}.pkg" > "$HOME/Downloads/node-latest.pkg" && sudo installer -store -pkg "$HOME/Downloads/node-latest.pkg" -target "/" +``` + +### Alternatives + +Using **[Homebrew](https://brew.sh/)**: + +```bash +brew install node +``` + +Using **[MacPorts](https://www.macports.org/)**: + +```bash +port install nodejs + +# Example +port install nodejs7 +``` + +Using **[pkgsrc](https://pkgsrc.joyent.com/install-on-osx/)**: + +Install the binary package: + +```bash +pkgin -y install nodejs +``` + +Or build manually from pkgsrc: + +```bash +cd pkgsrc/lang/nodejs && bmake install +``` + +## SmartOS and illumos + +SmartOS images come with pkgsrc pre-installed. On other illumos distributions, first install **[pkgsrc](https://pkgsrc.joyent.com/install-on-illumos/)**, then you may install the binary package as normal: + +```bash +pkgin -y install nodejs +``` + +Or build manually from pkgsrc: + +```bash +cd pkgsrc/lang/nodejs && bmake install +``` + +## Solus + +Solus provides Node.js in its main repository. + +```bash +sudo eopkg install nodejs +``` + +## Void Linux + +Void Linux ships Node.js stable in the main repository. + +```bash +xbps-install -Sy nodejs +``` + +## Windows + +Simply download the [Windows Installer](https://nodejs.org/en/#home-downloadhead) directly from the [nodejs.org](https://nodejs.org/) web site. + +### Alternatives + +Using **[Chocolatey](https://chocolatey.org/)**: + +```bash +cinst nodejs +# or for full install with npm +cinst nodejs.install +``` + +Using **[Scoop](https://scoop.sh/)**: + +```bash +scoop install nodejs +``` diff --git a/locale/ro/download/releases.md b/locale/ro/download/releases.md new file mode 100644 index 000000000000..dc2352ecd8b2 --- /dev/null +++ b/locale/ro/download/releases.md @@ -0,0 +1,23 @@ +--- +layout: download-releases.hbs +title: Previous Releases +modules: "NODE_MODULE_VERSION refers to the ABI (application binary interface) version number of Node.js, used to determine which versions of Node.js compiled C++ add-on binaries can be loaded in to without needing to be re-compiled. It used to be stored as hex value in earlier versions, but is now represented as an integer." +--- + +### io.js & Node.js +Releases 1.x through 3.x were called "io.js" as they were part of the io.js fork. As of Node.js 4.0.0 the former release lines of io.js converged with Node.js 0.12.x into unified Node.js releases. + +
+ +#### Looking for latest release of a version branch? + +* [Node.js 12.x](https://nodejs.org/dist/latest-v12.x/) +* [Node.js 10.x](https://nodejs.org/dist/latest-v10.x/) +* [Node.js 8.x](https://nodejs.org/dist/latest-v8.x/) +* [Node.js 6.x](https://nodejs.org/dist/latest-v6.x/) +* [Node.js 4.x](https://nodejs.org/dist/latest-v4.x/) +* [Node.js 0.12.x](https://nodejs.org/dist/latest-v0.12.x/) +* [Node.js 0.10.x](https://nodejs.org/dist/latest-v0.10.x/) +* [All versions](https://nodejs.org/dist/) + +
diff --git a/locale/ro/get-involved/code-and-learn.md b/locale/ro/get-involved/code-and-learn.md new file mode 100644 index 000000000000..78944e2973d5 --- /dev/null +++ b/locale/ro/get-involved/code-and-learn.md @@ -0,0 +1,24 @@ +--- +title: Code + Learn +layout: contribute.hbs +--- + +# Code + Learn + +Code & Learn events allow you to get started (or go further) with Node.js core contributions. Experienced contributors help guide you through your first (or second or third or fourth) commit to Node.js core. They also are available to provide impromptu guided tours through specific areas of Node.js core source code. + +* [Moscow, Russia on November 6, 2019](https://medium.com/piterjs/announcement-node-js-code-learn-in-moscow-fd997241c77) +* Shanghai, China at [COSCon](https://bagevent.com/event/5744455): November 3, 2019 +* Medellin, Colombia in June 21st & 22nd [NodeConfCo](https://colombia.nodeconf.com/) +* [Saint-Petersburg, Russia on May 26](https://medium.com/piterjs/code-learn-ce20d330530f) +* Bangalore, India at [Node.js - Code & Learn Meetup](https://www.meetup.com/Polyglot-Languages-Runtimes-Java-JVM-nodejs-Swift/events/256057028/): November 17, 2018 +* Kilkenny, Ireland at [NodeConfEU](https://www.nodeconf.eu/): November 4, 2018 +* Vancouver, BC at [Node Interactive](https://events.linuxfoundation.org/events/node-js-interactive-2018/): October 12, 2018 +* [Oakland on April 22, 2017](https://medium.com/the-node-js-collection/code-learn-learn-how-to-contribute-to-node-js-core-8a2dbdf9be45) +* Shanghai at JSConf.CN: July 2017 +* Vancouver, BC at [Node Interactive](http://events.linuxfoundation.org/events/node-interactive): October 6, 2017 +* Kilkenny, Ireland at [NodeConfEU](http://www.nodeconf.eu/): November 5, 2017 +* Austin in December 2016 +* Tokyo in November 2016 +* Amsterdam in September 2016 +* Dublin and London in September 2015 diff --git a/locale/ro/get-involved/collab-summit.md b/locale/ro/get-involved/collab-summit.md new file mode 100644 index 000000000000..6ae5d97c6e1e --- /dev/null +++ b/locale/ro/get-involved/collab-summit.md @@ -0,0 +1,17 @@ +--- +title: Collab Summit +layout: contribute.hbs +--- + +# Collab Summit +Collaboration Summit is an un-conference for bringing current and potential contributors together to discuss Node.js with lively collaboration, education, and knowledge sharing. Committees and working groups come together twice per year to make important decisions while also being able to work on some exciting efforts they want to push forward in-person. + +## Who attends? + +Anyone is welcome to attend Collab Summit. During the summit, leaders will help onboard new contributors to groups they'd love to help prior to integrating them into the working sessions. + +This is your opportunity to learn what is happening within the community to jump in and contribute with the skills you have and would like to hone. + +Working groups will put together a schedule so that people can familiarize themselves before folks get onsite, having the general collaborator discussions, and then dive into breakout sessions. + +We'd love to see you at Collab Summit! Check out the [Summit repo](https://github.com/nodejs/summit) for upcoming and past Collab Summits and have a look at the [issues filed](https://github.com/nodejs/summit/issues) that share what individual working groups and committees are looking to discuss in-person. diff --git a/locale/ro/get-involved/contribute.md b/locale/ro/get-involved/contribute.md new file mode 100644 index 000000000000..6309e099837f --- /dev/null +++ b/locale/ro/get-involved/contribute.md @@ -0,0 +1,47 @@ +--- +title: Contributing +layout: contribute.hbs +--- + +# Contributing + +Thank you for your interest in contributing to Node.js! There are multiple ways and places you can contribute, and we're here to help facilitate that. + +## Asking for General Help + +Because the level of activity in the `nodejs/node` repository is so high, questions or requests for general help using Node.js should be directed at the [Node.js help repository](https://github.com/nodejs/help/issues). + +## Reporting an Issue + +If you have found what you believe to be an issue with Node.js please do not hesitate to file an issue on the GitHub project. When filing your issue please make sure you can express the issue with a reproducible test case, and that test case should not include any external dependencies. That is to say, the test case can be executed without anything more than Node.js itself. + +When reporting an issue we also need as much information about your environment that you can include. We never know what information will be pertinent when trying narrow down the issue. Please include at least the following information: + +* Version of Node.js +* Platform you're running on (macOS, SmartOS, Linux, Windows) +* Architecture you're running on (32bit or 64bit and x86 or ARM) + +The Node.js project is currently managed across a number of separate GitHub repositories, each with their own separate issues database. If possible, please direct any issues you are reporting to the appropriate repository but don't worry if things happen to get put in the wrong place, the community of contributors will be more than happy to help get you pointed in the right direction. + +* To report issues specific to Node.js, please use [nodejs/node](https://github.com/nodejs/node) +* To report issues specific to this website, please use [nodejs/nodejs.org](https://github.com/nodejs/nodejs.org/issues) + +## Code contributions + +If you'd like to fix bugs or add a new feature to Node.js, please make sure you consult the [Node.js Contribution Guidelines](https://github.com/nodejs/node/blob/master/CONTRIBUTING.md#pull-requests). The review process by existing collaborators for all contributions to the project is explained there as well. + +If you are wondering how to start, you can check [Node Todo](https://www.nodetodo.org/) which may guide you towards your first contribution. + +## Becoming a collaborator + +By becoming a collaborator, contributors can have even more impact on the project. They can help other contributors by reviewing their contributions, triage issues and take an even bigger part in shaping the project's future. Individuals identified by the TSC as making significant and valuable contributions across any Node.js repository may be made Collaborators and given commit access to the project. Activities taken into consideration include (but are not limited to) the quality of: + +* code commits and pull requests +* documentation commits and pull requests +* comments on issues and pull requests +* contributions to the Node.js website +* assistance provided to end users and novice contributors +* participation in Working Groups +* other participation in the wider Node.js community + +If individuals making valuable contributions do not believe they have been considered for commit access, they may [log an issue](https://github.com/nodejs/TSC/issues) or [contact a TSC member](https://github.com/nodejs/TSC#current-members) directly. diff --git a/locale/ro/get-involved/index.md b/locale/ro/get-involved/index.md new file mode 100644 index 000000000000..708bf8f53c15 --- /dev/null +++ b/locale/ro/get-involved/index.md @@ -0,0 +1,34 @@ +--- +title: Get involved +layout: contribute.hbs +--- + +# Get Involved + +## Community Discussion + +* The [GitHub issues list](https://github.com/nodejs/node/issues) is the place for discussion of Node.js core features. +* For real-time chat about Node.js development go to `irc.freenode.net` in the `#node.js` channel with an [IRC client](https://en.wikipedia.org/wiki/Comparison_of_Internet_Relay_Chat_clients) or connect in your web browser to the channel using [freenode's WebChat](https://webchat.freenode.net/#node.js). +* The official Node.js Twitter account is [nodejs](https://twitter.com/nodejs). +* The [Node.js Foundation calendar](https://nodejs.org/calendar) with all public team meetings. +* [Node.js Everywhere](https://newsletter.nodejs.org) is the official Node.js Monthly Newsletter. +* [Node.js Collection](https://medium.com/the-node-js-collection) is a collection of community-curated content on Medium. +* The [Community Committee](https://github.com/nodejs/community-committee) is a top-level committee in the Node.js Foundation focused on community-facing efforts. +* [Node Slackers](https://www.nodeslackers.com/) is a Node.js-focused Slack community. + +## Learning + +* [Official API reference documentation](https://nodejs.org/api/) details the Node.js API. +* [NodeSchool.io](https://nodeschool.io/) will teach you Node.js concepts via interactive command-line games. +* [Stack Overflow Node.js tag](https://stackoverflow.com/questions/tagged/node.js) collects new information every day. +* [The DEV Community Node.js tag](https://dev.to/t/node) is a place to share Node.js projects, articles and tutorials as well as start discussions and ask for feedback on Node.js-related topics. Developers of all skill-levels are welcome to take part. +* [Nodeiflux](https://discordapp.com/invite/vUsrbjd) is a friendly community of Node.js backend developers supporting each other on Discord. + +## International community sites and projects + +* [Chinese community](https://cnodejs.org/) +* [Hungarian (Magyar) community](https://nodehun.blogspot.com/) +* [Israeli Facebook group for Node.js](https://www.facebook.com/groups/node.il/) +* [Japanese user group](https://nodejs.jp/) +* [Spanish language Facebook group for Node.js](https://www.facebook.com/groups/node.es/) +* [Vietnamese Node.js community](https://www.facebook.com/nodejs.vn/) diff --git a/locale/ro/get-involved/node-meetups.md b/locale/ro/get-involved/node-meetups.md new file mode 100644 index 000000000000..6333054cda64 --- /dev/null +++ b/locale/ro/get-involved/node-meetups.md @@ -0,0 +1,679 @@ +--- +title: Node.js Meetups +layout: contribute.hbs +--- + +# Node.js Meetups + +This is a list of Node.js meetups. Please submit a PR if you'd like to add your local group! + +## Code of Conduct + +If any meetup does not have a CoC and/or is reported as an unsafe place, it will be removed from this list. + +## Notes for adding meetups + +FORMAT + +* [Meetup](https://www.meetup.com/pdxnode/) +* Frequency of meetups +* How to submit a talk? «list here» +* Organizer names (if you wish to provide) +* Organizers contact info (if you wish to provide) + +REQUIREMENTS + +* Please state in your PR if this meetup abides by CoC. +* Link to CoC for verification. +* If you do not currently have a CoC, update the meetup with CoC before submitting. +* Submit your PR in alphabetical order. + +## Meetups + +### Africa + +* [Meetup](https://www.nodejs.africa) +* Frequency of meetups - bi-monthly +* How to submit a talk? [Submit to this form](https://docs.google.com/forms/d/e/1FAIpQLSe3vPkiO8ijtbP7fUhEotKefXU-fWUoDGtUSo1khmtA_7v1WQ/viewform) +* Organizer name - Agiri Abraham +* Organizer contact info - + +### Armenia/Yerevan + +* [Meetup](https://www.facebook.com/nodejsarmenia/) +* Frequency of meetups - quarterly +* How to submit a talk? [Write in our Telegram chat](https://t.me/nodejsarmenia) +* Organizer name - Node.js Armenian Community +* Organizer contact info - nodejsarm@gmail.com + +### Argentina + +#### Buenos Aires Province + +##### Buenos Aires + +* [Meetup](https://www.meetup.com/banodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Write a message in the meetup page +* Organizer name - Alejandro Oviedo +* Organizer contact info - + +### Australia + +#### Victoria + +##### Melbourne + +* [Meetup](https://www.meetup.com/NodeMelbourne/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Andrey Sidorov +* Organizer contact info - + +##### Sydney + +* [Meetup](https://www.meetup.com/node-sydney/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - James Richardson. Co-organizer: Jessica Claire +* Organizer contact info - + +### Belgium + +#### Brussels + +##### Brussels + +* [Meetup](https://www.meetup.com/Belgian-node-js-User-Group/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Steven Beeckman +* Organizer contact info - + +### Bolivia + +#### La Paz + +* [Meetup](https://www.meetup.com/LaPazjs) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer names - [Mauricio De La Quintana](https://github.com/maudel), [Guillermo Paredes](https://github.com/GuillermoParedes), [Adrian Zelada](https://github.com/adrianzelada). +* Organizer contact info - [@maudelaquintana](https://twitter.com/maudelaquintana) + +### Brazil + +#### São Paulo + +* [Meetup](https://meetup.com/nodebr) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer names - [Erick Wendel](https://github.com/erickwendel), [Alan Hoffmeister](https://github.com/alanhoff), [Igor França](https://github.com/horaddrim), [Icaro Caldeira](https://github.com/icarcal), [Marcus Bergamo](https://github.com/thebergamo), [Igor Halfeld](https://github.com/igorHalfeld), [Lucas Santos](https://github.com/khaosdoctor). +* Organizer contact info - [@erickwendel_](https://twitter.com/erickwendel_), [@_StaticVoid](https://twitter.com/_staticvoid) + +##### Campinas + +* [Meetup](https://www.meetup.com/Nodeschool-Campinas/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Filipe Oliveira +* Organizer contact info - + +#### Minas Gerais + +* [Meetup](https://www.meetup.com/nodebr/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Hugo Iuri +* Organizer contact info - + +#### Rio Grande do Sul + +##### Porto Alegre + +* [Meetup](https://www.meetup.com/Node-js-Porto-Alegre-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Henrique Schreiner +* Organizer contact info - + +### Canada + +#### British Columbia + +##### Vancouver + +* [Meetup](https://www.meetup.com/Node-JS-Community-Hangouts) +* Frequency of meetups - quarterly +* How to submit a talk? DM @keywordnew on twitter +* Organizer name - Manil Chowdhury +* Organizer contact info - + +#### Ontario + +##### Toronto + +* [Toronto JS Meetup](http://torontojs.com/) +* Frequency of meetups - weekly +* How to submit a talk? _Contact Organizers through Slack: http://slack.torontojs.com/_ +* Organizers name - Dann T. & Paul D. +* Organizer contact info - _Community Slack_ + +### Chile + +#### Santiago + +* [Meetup](https://www.meetup.com/es-ES/NodersJS/) +* Frequency of meetups - monthly +* How to submit a talk? Issue on GitHub [here](https://github.com/Noders/Meetups/issues/new) +* Organizer name - Rodrigo Adones and Ender Bonnet +* Organizer contact info - [Rodrigo](https://github.com/L0rdKras), [Ender](https://twitter.com/enbonnet) + +### Colombia + +#### Antioquia + +##### Medellín + +* [Meetup](https://www.meetup.com/node_co/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Camilo Montoya +* Organizer contact info - + +### Finland + +#### Uusimaa + +##### Helsinski + +* [Meetup](https://www.meetup.com/Helsinki-Node-js/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page +* Organizer name - Juha Lehtomaki +* Organizer contact info - + +### France + +#### Île-de-France + +##### Paris + +* [Meetup](https://www.meetup.com/Nodejs-Paris/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or submit your talk on [nodejs.paris website](http://nodejs.paris/meetups) +* Organizer name - [Etienne Folio](https://twitter.com/Ornthalas), [Stanislas Ormières](https://twitter.com/laruiss), [Nicolas KOKLA](https://twitter.com/nkokla), Quentin Raynaud +* Organizer contact info - + +### Germany + +#### Bavaria + +##### Passau + +* [Meetup](https://www.meetup.com/de-DE/Nodeschool-Passau/) +* Frequency of meetups - quarterly +* How to submit a talk? Email [Valentin](mailto:valentin.huber@msg.group) +* Organizer name - Valentin Huber +* Organizer contact info - [Email](mailto:valentin.huber@msg.group) + +#### Berlin + +* [Meetup](https://www.meetup.com/Node-js-Meetup-Berlin/) +* Frequency of meetups - monthly +* How to submit a talk? Email [Andreas](mailto:npm@lubbe.org) +* Organizer name - Andreas Lubbe +* Organizer contact info - [Email](mailto:npm@lubbe.org) + +#### Hamburg + +* [Meetup](https://www.meetup.com/node-HH/) +* Frequency of meetups - monthly and on demand +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Gregor Elke, Oliver Lorenz +* Organizer contact info - via Meetup, via [Slack](http://bit.ly/web-hh) + +### Greece + +#### Athens + +* [Meetup](https://www.meetup.com/nodejsathens/) +* Frequency of meetups - every two months +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - [Ioannis Nikolaou](https://www.linkedin.com/in/ioannis-nikolaou/) Co-organizers - Stratoula Kalafateli, [Kostas Siabanis](https://github.com/ksiabani), Megaklis Vasilakis +* Organizer contact info - + +### Hungary + +#### Budapest + +* [Meetup](https://www.meetup.com/nodebp/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Oroszi Róbert +* Organizer contact info - + +### India +#### Maharashtra + +##### Pune + +* [Meetup](https://www.meetup.com/JavaScripters) +* Frequency of meetups - monthly +* How to submit a talk? Send your queries to Pune.javascripters@gmail.com or Contact organizers in the meetup page. +* Organizer name - Imran shaikh & Akash Jarad +* Organizer contact info - javascripters.community@gmail.com + +##### Delhi + +* [Meetup](https://www.meetup.com/nodeJS-Devs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Gaurav Gandhi. +* Organizer contact info - + +#### Gujarat + +##### Ahmedabad + +* [Meetup](https://www.meetup.com/meetup-group-iAIoTVuS/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or write to dipesh@rapidops.com +* Organizer name - Dipesh Patel +* Organizer contact info - + +#### Rajasthan + +##### Jaipur + +* [Meetup](https://www.meetup.com/JaipurJS-Developer-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? [Email ayushrawal12@gmail.com](mailto:ayushrawal12@gmail.com) or [reach out to me on LinkedIn](https://www.linkedin.com/in/ayush-rawal) +* Organizer name - [Ayush Rawal](https://github.com/ayush-rawal) +* Organizer contact info - [Email](mailto:ayushrawal12@gmail.com) + +### Indonesia + +#### Jakarta + +* [Meetup](https://www.meetup.com/Node-js-Workshop/) +* Frequency of meetups - monthly - online +* How to submit a talk? [telegram group](https://t.me/nodejsid) +* Organizer name - Lukluk Luhuring Santoso +* Organizer contact info - [Email](mailto:luklukaha@gmail.com) + +### Ireland + +#### Dublin + +* [Meetup](https://www.meetup.com/Dublin-Node-js-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Sean Walsh. Co-organizer: Leanne Vaughey +* Organizer contact info - + +* [Meetup](https://www.meetup.com/Nodeschool-Dublin-Meetup/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Anton Whalley +* Organizer contact info - + +### Israel + +#### Tel Aviv + +* [Meetup](https://www.meetup.com/NodeJS-Israel/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or submit your talk on [Node.js-IL website](http://www.nodejsil.com/). +* Organizer name - [Idan Dagan](https://github.com/idandagan1), [Guy Segev](https://github.com/guyguyon), [Tomer Omri](https://github.com/TomerOmri) +* Organizer contact info - [Email](mailto:nodejsisrael8@gmail.com) + +### Mexico + +#### Mexico City + +* [Meetup](https://www.meetup.com/NodeBotsMX/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Saúl Buentello +* Organizer contact info - + +### New Zealand + +#### Auckland + +* [Meetup](https://www.meetup.com/AucklandNodeJs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - George Czabania +* Organizer contact info - + +### Russia + +#### Moscow + +* [Meetup](https://www.meetup.com/Moscow-NodeJS-Meetup/) +* Frequency of meetups - every 6-9 month +* How to submit a talk? Contact organizers in the meetup page or use contacts information below +* Organizer name - Denis Izmaylov +* Organizer contact info - [Telegram](https://t.me/DenisIzmaylov) \[Twitter\](https://twitter.com/DenisIzmaylov] [Facebook](https://facebook.com/denis.izmaylov) + +### South Africa + +#### Cape Town + +* [Meetup](https://www.meetup.com/nodecpt/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Guy Bedford +* Organizer contact info - + +### Spain + +#### Madrid + +* [Meetup](https://www.meetup.com/Node-js-Madrid/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Alex Fernández +* Organizer contact info - + +### Thailand + +#### Bangkok + +* [Meetup](https://www.meetup.com/Bangkok-Node-js/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Dylan Jay +* Organizer contact info - + +### Turkey + +#### Istanbul + +* [Meetup](https://www.meetup.com/nodeschool-istanbul/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Arif Çakıroğlu +* Organizer contact info - + +### United States + +#### Arizona + +##### Mesa + +* [Meetup](https://www.meetup.com/NodeAZ/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Chris Matthieu +* Organizer contact info - + +#### California + +##### Los Angeles + +* [js.la](https://js.la) +* Frequency of meetups - monthly +* How to submit a talk? [contribute.js.la](https://contribute.js.la) +* Organizer name - David Guttman +* Organizer contact info - @dguttman on [slack.js.la](https://slack.js.la) + +##### Irvine + +* [Meetup](https://www.meetup.com/Node-JS-OC/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Farsheed Atef +* Organizer contact info - + +##### San Francisco + +* [Meetup](https://www.meetup.com/sfnode/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Dan Shaw +* Organizer contact info - + +* [Meetup](https://www.meetup.com/Node-js-Serverside-Javascripters-Club-SF/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Matt Pardee +* Organizer contact info - + +#### Colorado + +##### Denver + +* [Meetup](https://www.meetup.com/Node-js-Denver-Boulder/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page +* Organizer name - Brooks Patton +* Organizer contact info - + +#### Florida + +##### Jacksonville + +* [Meetup](https://www.meetup.com/Jax-Node-js-UG/) +* [Website](https://www.jaxnode.com) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - David Fekke +* Organizer contact info - David Fekke at gmail dot com + +#### Georgia + +##### Atlanta + +* [Meetup](https://www.meetup.com/Atlanta-Nodejs-Developers/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Ryan Connelly +* Organizer contact info - + +#### Illinois + +##### Chicago + +* [Meetup](https://www.meetup.com/Chicago-Nodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page or (https://airtable.com/shrTDwmMH3zsnsWOE) +* Organizer name - Mike Hostetler, Zeke Nierenberg, & Ben Neiswander +* Organizer contact info - + +#### Indiana + +##### Indianapolis + +* [Meetup](https://www.meetup.com/Node-indy/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Mike Seidle +* Organizer contact info - + +#### Massachusetts + +##### Boston + +* [Meetup](https://www.meetup.com/Boston-Node/) +* Frequency of meetups - ~monthly +* How to submit a talk? Contact organizers in the meetup page or post in slack workspace #\_node\_meetup (see below). +* Organizer name - [Brian Sodano](https://github.com/codemouse) +* Organizer contact info - [briansodano@gmail.com](mailto:briansodano@gmail.com) or [Boston JS slack workspace](https://bostonjavascript.slack.com) + +#### Michigan + +##### Detroit + +* [Meetup](https://www.meetup.com/DetNode/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Steve Marsh, Norman Witte and Israel V + +#### Minnesota + +##### Minneapolis + +* [Meetup](https://www.meetup.com/NodeMN/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Mike Frey +* Organizer contact info - + +#### New York + +##### New York + +* [Meetup](https://www.meetup.com/nodejs/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Patrick Scott Co-organizer: Matt Walters. +* Organizer contact info - +* How to submit a talk? Contact Pat Scott @ [pat@patscott.io](mailto:pat@patscott.io). Matt Walters @ [meetup@iammattwalters.com](mailto:meetup@iammattwalters.com). +* Slack: [join.thenodejsmeetup.com](http://join.thenodejsmeetup.com/) +* Videos: [https://www.youtube.com/c/thenodejsmeetup](https://www.youtube.com/c/thenodejsmeetup) + +#### North Carolina + +##### Raleigh Durham + +* [Meetup](https://www.meetup.com/triangle-nodejs/) +* Frequency of meetups - quarterly +* How to submit a talk? Email ladyleet@nodejs.org +* Organizer name - Tracy Lee +* Organizer contact info - ladyleet@nodejs.org + +#### Oregon + +##### Portland + +* [Meetup](http://pdxnode.org/) +* Frequency of meetups - Biweekly (presentation night 2nd Thursdays, hack night last Thursdays) +* How to submit a talk? [Submit a talk proposal](https://github.com/PDXNode/pdxnode/issues/new), or DM [@obensource](https://twitter.com/obensource) or [@MichelleJLevine](https://twitter.com/MichelleJLevine) on twitter +* Organizer names - Ben Michel, Michelle Levine +* Organizer contact info - Ben: benpmichel@gmail.com, Michelle: michelle@michellejl.com + +#### Pennsylvania + +##### Philadelphia + +* [Meetup](https://www.meetup.com/nodejs-philly/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page: https://www.meetup.com/nodejs-philly/members/14283814/ +* Organizer name - Leomar Durán +* Organizer contact info - + +#### Texas + +##### Austin + +* [Meetup](https://www.meetup.com/austinnodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact Matt Walters @ [meetup@iammattwalters.com](mailto:meetup@iammattwalters.com). +* Organizer name - [Matt Walters](https://github.com/mateodelnorte/) +* Organizer contact info - [meetup@iammattwalters.com](mailto:meetup@iammattwalters.com) +* Slack: [join.thenodejsmeetup.com](http://join.thenodejsmeetup.com/) +* Videos: [https://www.youtube.com/c/thenodejsmeetup](https://www.youtube.com/c/thenodejsmeetup) + +* [Meetup](https://www.meetup.com/ATXNodeSchool/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Stefan von Ellenrieder +* Organizer contact info - + +##### Dallas + +* [Meetup](https://www.meetup.com/DallasNode/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - [Cameron Steele](https://github.com/ATechAdventurer) +* Organizer contact info - [Cam.steeleis@gmail.com](mailto:Cam.steeleis@gmail.com) + +#### Utah + +##### Salt Lake City + +* [Meetup](https://www.meetup.com/utahnodejs/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page. +* Organizer name - Aaron Seth Madsen +* Organizer contact info - + +#### Washington + +##### Seattle + +* [Meetup](https://www.meetup.com/Seattle-Node-js/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Ryan Roemer +* Organizer contact info - + +* [Meetup](https://www.meetup.com/Seattle-NodeSchool/) +* Frequency of meetups - monthly +* How to submit a talk? Contact organizers in the meetup page +* Organizer name - Wil Alvarez +* Organizer contact info - + +#### Washington, DC. + +* [Meetup](https://www.meetup.com/node-dc/) +* Frequency of meetups - monthly +* How to submit a talk? Write to Andrew Dunkman adunkman@gmail.com +* Organizer name - Andrew Dunkman +* Organizer contact info - + +### UK + +#### London +##### LNUG + +* [Meetup](https://www.meetup.com/london-nodejs/) +* [GitHub/lnug](https://github.com/lnug/) +* Frequency of meetups - monthly +* How to submit a talk? Visit our [speakers repos](https://github.com/lnug/speakers), read the guidelines, and [submit a talk proposal as a new issue](https://github.com/lnug/speakers/issues). +* Organizer name - Adam Davis +* Organizer contact info - contact@lnug.org, [@lnugOrg](https://twitter.com/lnugorg) + +##### Node.js Workshops + +* [Meetup](https://www.meetup.com/NodeWorkshops//) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Daryn Holmes +* Organizer contact info - + +#### Cambridge + +* [Meetup](https://www.meetup.com/JavaScript-Cambridge/) +* Frequency of meetups - monthly +* How to submit a talk? +* Organizer name - Joe Parry, co-organizer Rob Moran +* Organizer contact info - + +#### Oxford + +* [JSOxford](https://www.meetup.com/jsoxford/) +* Frequency of meetups - every 2 months +* How to submit a talk? [Submit Form](https://docs.google.com/forms/d/e/1FAIpQLSflx7LU44PuwlyCJj-WwlP_SlrUvxAd8uaXlY7_O65c7RLpGQ/viewform?usp=sf_link) +* Organizer names - Marcus Noble, Seren Davies +* Organizers contact info - organisers@jsoxford.com + +#### Edinburgh + +* [Node.js Edinburgh](https://www.meetup.com/Nodejs-Edinburgh/) +* Frequency of meetups - every 2 months +* How to submit a talk? [Submit Talk](mailto:michael@biggles.io?subject=Node.js%20Talk%20Proposal) +* Organizer names - Michael Antczak +* Organizers contact info - [AntczakMichael](https://twitter.com/AntczakMichael) + +### Ukraine + +#### Kiev + +* [Meetup](https://www.meetup.com/NodeUA/), [Old group](https://www.meetup.com/KievNodeJS/) +* Frequency of meetups - 1-8 times a month +* How to submit a talk? Contact organizer by email. +* Organizer name - Timur Shemsedinov +* Organizer contact info - [Email](mailto:timur.shemsedinov@gmail.com) diff --git a/locale/ro/index.md b/locale/ro/index.md new file mode 100644 index 000000000000..184470420436 --- /dev/null +++ b/locale/ro/index.md @@ -0,0 +1,23 @@ +--- +layout: index.hbs +labels: + current-version: Current Version + download: Download + download-for: Download for + other-downloads: Other Downloads + other-lts-downloads: Other LTS Downloads + other-current-downloads: Other Current Downloads + current: Current + lts: LTS + tagline-current: Latest Features + tagline-lts: Recommended For Most Users + changelog: Changelog + api: API Docs + version-schedule-prompt: Or have a look at the + version-schedule-prompt-link-text: Long Term Support (LTS) schedule. + newsletter: true + newsletter-prefix: Sign up for + newsletter-postfix: ", the official Node.js Monthly Newsletter." +--- + +Node.js® is a JavaScript runtime built on [Chrome's V8 JavaScript engine](https://v8.dev/). diff --git a/locale/ro/knowledge/HTTP/clients/how-to-access-query-string-parameters.md b/locale/ro/knowledge/HTTP/clients/how-to-access-query-string-parameters.md new file mode 100644 index 000000000000..4db8a1043616 --- /dev/null +++ b/locale/ro/knowledge/HTTP/clients/how-to-access-query-string-parameters.md @@ -0,0 +1,54 @@ +--- +title: How to access query string parameters +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +In Node.js, functionality to aid in the accessing of URL query string parameters is built into the standard library. The built-in `url.parse` method takes care of most of the heavy lifting for us. Here is an example script using this handy function and an explanation on how it works: + +```js +const http = require('http'); +const url = require('url'); + +http.createServer(function (req, res) { + const queryObject = url.parse(req.url,true).query; + console.log(queryObject); + + res.writeHead(200, {'Content-Type': 'text/html'}); + res.end('Feel free to add query parameters to the end of the url'); +}).listen(8080); +``` + +> To test this code run `node app.js` (app.js is name of the file) on the terminal and then go to your browser and type `http://localhost:8080/app.js?foo=bad&baz=foo` on the URL bar + +The key part of this whole script is this line: `const queryObject = url.parse(req.url,true).query;`. Let's take a look at things from the inside-out. First off, `req.url` will look like `/app.js?foo=bad&baz=foo`. This is the part that is in the URL bar of the browser. Next, it gets passed to `url.parse` which parses out the various elements of the URL (NOTE: the second paramater is a boolean stating whether the method should parse the query string, so we set it to true). Finally, we access the `.query` property, which returns us a nice, friendly JavaScript object with our query string data. + +The `url.parse()` method returns an object which have many key value pairs one of which is the `query` object. Some other handy information returned by the method include `host`, `pathname`, `search` keys. + +In the above code: + +* `url.parse(req.url,true).query` returns `{ foo: 'bad', baz: 'foo' }`. +* `url.parse(req.url,true).host` returns `'localhost:8080'`. +* `url.parse(req.url,true).pathname` returns `'/app.js'`. +* `url.parse(req.url,true).search` returns `'?foo=bad&baz=foo'`. + +### Parsing with querystring + +Another way to access query string parameters is parsing them using the `querystring` builtin Node.js module. + +This method, however, must be passed just a querystring portion of a url. Passing it the whole url, like you did in the `url.parse` example, won't parse the querystrings. + +```js +const querystring = require('querystring'); +const url = "http://example.com/index.html?code=string&key=12&id=false"; +const qs = "code=string&key=12&id=false"; + +console.log(querystring.parse(qs)); +// > { code: 'string', key: '12', id: 'false' } + +console.log(querystring.parse(url)); +// > { 'http://example.com/index.html?code': 'string', key: '12', id: 'false' } +``` diff --git a/locale/ro/knowledge/HTTP/clients/how-to-create-a-HTTP-request.md b/locale/ro/knowledge/HTTP/clients/how-to-create-a-HTTP-request.md new file mode 100644 index 000000000000..2fcbd9ef8d81 --- /dev/null +++ b/locale/ro/knowledge/HTTP/clients/how-to-create-a-HTTP-request.md @@ -0,0 +1,99 @@ +--- +title: How do I make a http request? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - http +difficulty: 2 +layout: knowledge-post.hbs +--- + +Another extremely common programming task is making an HTTP request to a web server. Node.js provides an extremely simple API for this functionality in the form of `http.request`. + +As an example, we are going to preform a GET request to (which returns a random integer between 1 and 10) and print the result to the console. + +```javascript +var http = require('http'); + +//The url we want is: 'www.random.org/integers/?num=1&min=1&max=10&col=1&base=10&format=plain&rnd=new' +var options = { + host: 'www.random.org', + path: '/integers/?num=1&min=1&max=10&col=1&base=10&format=plain&rnd=new' +}; + +callback = function(response) { + var str = ''; + + //another chunk of data has been received, so append it to `str` + response.on('data', function (chunk) { + str += chunk; + }); + + //the whole response has been received, so we just print it out here + response.on('end', function () { + console.log(str); + }); +} + +http.request(options, callback).end(); +``` + +Making a POST request is just as easy. We will make a POST request to `www.nodejitsu.com:1337` which is running a server that will echo back what we post. The code for making a POST request is almost identical to making a GET request, just a few simple modifications: + +```javascript +var http = require('http'); + +//The url we want is `www.nodejitsu.com:1337/` +var options = { + host: 'www.nodejitsu.com', + path: '/', + //since we are listening on a custom port, we need to specify it by hand + port: '1337', + //This is what changes the request to a POST request + method: 'POST' +}; + +callback = function(response) { + var str = '' + response.on('data', function (chunk) { + str += chunk; + }); + + response.on('end', function () { + console.log(str); + }); +} + +var req = http.request(options, callback); +//This is the data we are posting, it needs to be a string or a buffer +req.write("hello world!"); +req.end(); +``` + +Throwing in custom headers is just a tiny bit harder. On `www.nodejitsu.com:1338` we are running a server that will print out the `custom` header. So we will just make a quick request to it: + +```javascript +var http = require('http'); + +var options = { + host: 'www.nodejitsu.com', + path: '/', + port: '1338', + //This is the only line that is new. `headers` is an object with the headers to request + headers: {'custom': 'Custom Header Demo works'} +}; + +callback = function(response) { + var str = '' + response.on('data', function (chunk) { + str += chunk; + }); + + response.on('end', function () { + console.log(str); + }); +} + +var req = http.request(options, callback); +req.end(); +``` diff --git a/locale/ro/knowledge/HTTP/servers/how-to-create-a-HTTP-server.md b/locale/ro/knowledge/HTTP/servers/how-to-create-a-HTTP-server.md new file mode 100644 index 000000000000..bb15f042c8e8 --- /dev/null +++ b/locale/ro/knowledge/HTTP/servers/how-to-create-a-HTTP-server.md @@ -0,0 +1,42 @@ +--- +title: How do I create a HTTP server? +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +Making a simple HTTP server in Node.js has become the de facto 'hello world' for the platform. On the one hand, Node.js provides extremely easy-to-use HTTP APIs; on the other hand, a simple web server also serves as an excellent demonstration of the asynchronous strengths of Node.js. + +Let's take a look at a very simple example: + +```javascript +const http = require('http'); + +const requestListener = function (req, res) { + res.writeHead(200); + res.end('Hello, World!'); +} + +const server = http.createServer(requestListener); +server.listen(8080); +``` + +Save this in a file called `server.js` - run `node server.js`, and your program will hang there... it's waiting for connections to respond to, so you'll have to give it one if you want to see it do anything. Try opening up a browser, and typing `localhost:8080` into the location bar. If everything has been set up correctly, you should see your server saying hello! + +Also, from your terminal you should be able to get the response using curl: + +``` +curl localhost:8080 +``` + +Let's take a more in-depth look at what the above code is doing. First, a function is defined called `requestListener` that takes a request object and a response object as parameters. + +The request object contains things such as the requested URL, but in this example we ignore it and always return "Hello World". + +The response object is how we send the headers and contents of the response back to the user making the request. Here we return a 200 response code (signaling a successful response) with the body "Hello World". Other headers, such as `Content-type`, would also be set here. + +Next, the `http.createServer` method creates a server that calls `requestListener` whenever a request comes in. The next line, `server.listen(8080)`, calls the `listen` method, which causes the server to wait for incoming requests on the specified port - 8080, in this case. + +There you have it - your most basic Node.js HTTP server. diff --git a/locale/ro/knowledge/HTTP/servers/how-to-create-a-HTTPS-server.md b/locale/ro/knowledge/HTTP/servers/how-to-create-a-HTTPS-server.md new file mode 100644 index 000000000000..7ab2dcfa0b13 --- /dev/null +++ b/locale/ro/knowledge/HTTP/servers/how-to-create-a-HTTPS-server.md @@ -0,0 +1,54 @@ +--- +title: How to create an https server? +date: '2011-08-26T10:08:50.000Z' +tags: + - https +difficulty: 1 +layout: knowledge-post.hbs +--- + +*If you're using [Nodejitsu](http://nodejitsu.com)*, we handle HTTPS for you. Free SSL on jit.su and nodejitsu.com subdomains, and SSL on custom domains for business customers. *It's never necessary to create an HTTPS server yourself.* + +--- + +To create an HTTPS server, you need two things: an SSL certificate, and built-in `https` Node.js module. + +We need to start out with a word about SSL certificates. Speaking generally, there are two kinds of certificates: those signed by a 'Certificate Authority', or CA, and 'self-signed certificates'. A Certificate Authority is a trusted source for an SSL certificate, and using a certificate from a CA allows your users to be trust the identity of your website. In most cases, you would want to use a CA-signed certificate in a production environment - for testing purposes, however, a self-signed certicate will do just fine. + +To generate a self-signed certificate, run the following in your shell: + +``` +openssl genrsa -out key.pem +openssl req -new -key key.pem -out csr.pem +openssl x509 -req -days 9999 -in csr.pem -signkey key.pem -out cert.pem +rm csr.pem +``` + +This should leave you with two files, `cert.pem` (the certificate) and `key.pem` (the private key). Put these files in the same directory as your Node.js server file. This is all you need for a SSL connection. So now you set up a quick hello world example (the biggest difference between https and [http](/en/knowledge/HTTP/servers/how-to-create-a-HTTP-server/) is the `options` parameter): + +```javascript +const https = require('https'); +const fs = require('fs'); + +const options = { + key: fs.readFileSync('key.pem'), + cert: fs.readFileSync('cert.pem') +}; + +https.createServer(options, function (req, res) { + res.writeHead(200); + res.end("hello world\n"); +}).listen(8000); +``` + +NODE PRO TIP: Note `fs.readFileSync` - unlike `fs.readFile`, `fs.readFileSync` will block the entire process until it completes. In situations like this - loading vital configuration data - the `sync` functions are okay. In a busy server, however, using a synchronous function during a request will force the server to deal with the requests one by one! + +> To start your https server, run `node app.js` (here, app.js is name of the file) on the terminal. + +Now that your server is set up and started, you should be able to get the file with curl: + +``` +curl -k https://localhost:8000 +``` + +or in your browser, by going to https://localhost:8000 . diff --git a/locale/ro/knowledge/HTTP/servers/how-to-handle-multipart-form-data.md b/locale/ro/knowledge/HTTP/servers/how-to-handle-multipart-form-data.md new file mode 100644 index 000000000000..3121840d80c6 --- /dev/null +++ b/locale/ro/knowledge/HTTP/servers/how-to-handle-multipart-form-data.md @@ -0,0 +1,65 @@ +--- +title: How to handle multipart form data +date: '2011-09-09T10:08:50.000Z' +tags: + - http + - forms + - multipart + - uploads +difficulty: 3 +layout: knowledge-post.hbs +--- + +Handling form data and file uploads properly is an important and complex problem in HTTP servers. Doing it by hand would involve parsing streaming binary data, writing it to the file system, parsing out other form data, and several other complex concerns - luckily, only a very few people will need to worry about it on that deep level. Felix Geisendorfer, one of the Node.js core committers, wrote a library called `node-formidable` that handles all the hard parts for you. With its friendly API, you can be parsing forms and receiving file uploads in no time. + +This example is taken directly from the `node-formidable` GitHub page, with some additional explanation added. + +```javascript +var formidable = require('formidable'), + http = require('http'), + util = require('util'); + +http.createServer(function(req, res) { + + // This if statement is here to catch form submissions, and initiate multipart form data parsing. + + if (req.url == '/upload' && req.method.toLowerCase() == 'post') { + + // Instantiate a new formidable form for processing. + + var form = new formidable.IncomingForm(); + + // form.parse analyzes the incoming stream data, picking apart the different fields and files for you. + + form.parse(req, function(err, fields, files) { + if (err) { + + // Check for and handle any errors here. + + console.error(err.message); + return; + } + res.writeHead(200, {'content-type': 'text/plain'}); + res.write('received upload:\n\n'); + + // This last line responds to the form submission with a list of the parsed data and files. + + res.end(util.inspect({fields: fields, files: files})); + }); + return; + } + + // If this is a regular request, and not a form submission, then send the form. + + res.writeHead(200, {'content-type': 'text/html'}); + res.end( + '
'+ + '
'+ + '
'+ + ''+ + '
' + ); +}).listen(8080); +``` + +Try it out for yourself - it's definitely the simpler solution, and `node-formidable` is a battle-hardened, production-ready library. Let userland solve problems like this for you, so that you can get back to writing the rest of your code! diff --git a/locale/ro/knowledge/HTTP/servers/how-to-read-POST-data.md b/locale/ro/knowledge/HTTP/servers/how-to-read-POST-data.md new file mode 100644 index 000000000000..90d4defb6e54 --- /dev/null +++ b/locale/ro/knowledge/HTTP/servers/how-to-read-POST-data.md @@ -0,0 +1,45 @@ +--- +title: How can I read POST data? +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +Reading the data from a POST request (i.e. a form submission) can be a little bit of a pitfall in Node.js, so we're going to go through an example of how to do it properly. The first step, obviously, is to listen for incoming data - the trick is to wait for the data to finish, so that you can process all the form data without losing anything. + +Here is a quick script that shows you how to do exactly that: + +```javascript +var http = require('http'); +var postHTML = + 'Post Example' + + '' + + '
' + + 'Input 1:
' + + 'Input 2:
' + + '' + + '
' + + ''; + +http.createServer(function (req, res) { + var body = ""; + req.on('data', function (chunk) { + body += chunk; + }); + req.on('end', function () { + console.log('POSTed: ' + body); + res.writeHead(200); + res.end(postHTML); + }); +}).listen(8080); +``` + +The variable `postHTML` is a static string containing the HTML for two input boxes and a submit box - this HTML is provided so that you can `POST` example data. This is NOT the right way to serve static HTML - please see [How to Serve Static Files](/en/knowledge/HTTP/servers/how-to-serve-static-files/) for a more proper example. + +With the HTML out of the way, we [create a server](/en/knowledge/HTTP/servers/how-to-create-a-HTTP-server/) to listen for requests. It is important to note, when listening for POST data, that the `req` object is also an [Event Emitter](/en/knowledge/getting-started/control-flow/what-are-event-emitters/). `req`, therefore, will emit a `data` event whenever a 'chunk' of incoming data is received; when there is no more incoming data, the `end` event is emitted. So, in our case, we listen for `data` events. Once all the data is received, we log the data to the console and send the response. + +Something important to note is that the event listeners are being added immediately after the request object is received. If you don't immediately set them, then there is a possibility of missing some of the events. If, for example, an event listener was attached from inside a callback, then the `data` and `end` events might be fired in the meantime with no listeners attached! + +You can save this script to `server.js` and run it with `node server.js`. Once you run it you will notice that occasionally you will see lines with no data, e.g. `POSTed:`. This happens because regular `GET` requests go through the same codepath. In a more 'real-world' application, it would be proper practice to check the type of request and handle the different request types differently. diff --git a/locale/ro/knowledge/HTTP/servers/how-to-serve-static-files.md b/locale/ro/knowledge/HTTP/servers/how-to-serve-static-files.md new file mode 100644 index 000000000000..327118288865 --- /dev/null +++ b/locale/ro/knowledge/HTTP/servers/how-to-serve-static-files.md @@ -0,0 +1,46 @@ +--- +title: How to serve static files +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +A basic necessity for most [http servers](/en/knowledge/HTTP/servers/how-to-create-a-HTTPS-server/) is to be able to serve static files. Thankfully, it is not that hard to do in Node.js. First you [read the file](/en/knowledge/file-system/how-to-read-files-in-nodejs/), then you serve the file. Here is an example of a script that will serve the files in the current directory: + +```javascript +var fs = require('fs'), + http = require('http'); + +http.createServer(function (req, res) { + fs.readFile(__dirname + req.url, function (err,data) { + if (err) { + res.writeHead(404); + res.end(JSON.stringify(err)); + return; + } + res.writeHead(200); + res.end(data); + }); +}).listen(8080); +``` + +This example takes the path requested and it serves that path, relative to the local directory. This works fine as a quick solution; however, there are a few problems with this approach. First, this code does not correctly handle mime types. Additionally, a proper static file server should really be taking advantage of client side caching, and should send a "Not Modified" response if nothing has changed. Furthermore, there are security bugs that can enable a malicious user to break out of the current directory. (for example, `GET /../../../`). + +Each of these can be addressed invidually without much difficulty. You can send the proper mime type header. You can figure how to utilize the client caches. You can take advantage of `path.normalize` to make sure that requests don't break out of the current directory. But why write all that code when you can just use someone else's library? + +There is a good static file server called [node-static](https://github.com/cloudhead/node-static) written by Alexis Sellier which you can leverage. Here is a script which functions similarly to the previous one: + +```javascript +var static = require('node-static'); +var http = require('http'); + +var file = new(static.Server)(); + +http.createServer(function (req, res) { + file.serve(req, res); +}).listen(8080); +``` + +This is a fully functional file server that doesn't have any of the bugs previously mentioned. This is just the most basic set up, there are more things you can do if you look at [the api](https://github.com/cloudhead/node-static). Also since it is an open source project, you can always modify it to your needs (and feel free to contribute back to the project!). diff --git a/locale/ro/knowledge/REPL/how-to-create-a-custom-repl.md b/locale/ro/knowledge/REPL/how-to-create-a-custom-repl.md new file mode 100644 index 000000000000..a57195f66605 --- /dev/null +++ b/locale/ro/knowledge/REPL/how-to-create-a-custom-repl.md @@ -0,0 +1,105 @@ +--- +title: How to create and use a custom REPL +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - repl +difficulty: 2 +layout: knowledge-post.hbs +--- + +Node.js allows users to create their own REPLs with the [repl module](https://nodejs.org/api/repl.html). Its basic use looks like this: + +```js +var repl = require('repl') + +repl.start(prompt, stream); +``` + +Above, `prompt` is a string that's used for the prompt of your REPL (which defaults to "> ") and `stream` is the stream that the repl listens on, defaulting to `process.stdin`. When you run the standalone `node` REPL from the command prompt, what it's doing in the background is running `repl.start()` to give you the standard REPL. + +However, the repl is pretty flexible. Here's an example that shows this off: + +```js +#!/usr/bin/env node + +var net = require("net"); +var repl = require("repl"); + +var mood = function () { + var m = [ "^__^", "-___-;", ">.<", "<_>" ]; + return m[Math.floor(Math.random()*m.length)]; +}; + +//A remote node repl that you can telnet to! +net.createServer(function (socket) { + var remote = repl.start("node::remote> ", socket); + //Adding "mood" and "bonus" to the remote REPL's context. + remote.context.mood = mood; + remote.context.bonus = "UNLOCKED"; +}).listen(5001); + +console.log("Remote REPL started on port 5001."); + +//A "local" node repl with a custom prompt +var local = repl.start("node::local> "); + +// Exposing the function "mood" to the local REPL's context. +local.context.mood = mood; +``` + +This script creates *two* REPLs: One is normal excepting for its custom prompt, but the *other* is exposed via the net module so you can telnet to it! In addition, it uses the `context` property to expose the function "mood" to both REPLs, and the "bonus" string to the remote REPL only. As you will see, this approach of trying to expose objects to one REPL and not the other *doesn't really work*. + +In addition, all objects in the global scope will also be accessible to your REPLs. + +Here's what happens when you run the script: + +```shell +$ node repl.js +Remote REPL started on port 5001. +node::local> .exit +# -C + +$ node repl.js +Remote REPL started on port 5001. +node::local> mood() +'^__^' +node::local> bonus +ReferenceError: bonus is not defined +``` + +As may be seen, the `mood` function is usable within the local REPL, but the `bonus` string is not. This is as expected. + +Now, here's what happens when you try to telnet to port 5001: + +```shell +$ telnet localhost 5001 +Trying ::1... +Trying 127.0.0.1... +Connected to localhost. +Escape character is '^]'. +node::remote> mood() +'>.<' +node::remote> bonus +'UNLOCKED' +``` + +As you can see, the `mood` function is *also* available over telnet! In addition, so is "bonus". + +As an interesting consequence of my actions, bonus is now also defined on the local REPL: + +```shell +node::local> bonus +'UNLOCKED' +``` + +It seems we "unlocked" the `bonus` string on the local REPL as well. As it turns out, any variables created in one REPL are also available to the other: + +```shell +node::local> var node = "AWESOME!" + +node::remote> node +'AWESOME!' +``` + +As you can see, the node REPL is powerful and flexible. diff --git a/locale/ro/knowledge/REPL/how-to-use-nodejs-repl.md b/locale/ro/knowledge/REPL/how-to-use-nodejs-repl.md new file mode 100644 index 000000000000..a05589df85b7 --- /dev/null +++ b/locale/ro/knowledge/REPL/how-to-use-nodejs-repl.md @@ -0,0 +1,119 @@ +--- +title: "How do I use node's REPL?" +date: '2011-08-26T10:08:50.000Z' +tags: + - cli + - repl +difficulty: 1 +layout: knowledge-post.hbs +--- + +# Learn to use the REPL + +Node.js ships with a Read-Eval-Print Loop, also known as a REPL. It is the Node.js interactive shell; any valid JavaScript which can be written in a script can be passed to the REPL. It can be extremely useful for experimenting with Node.js, debugging code, and figuring out some of JavaScript's more eccentric behaviors. + +Node.js has a standalone REPL accessible from the command line, and a built in REPL module you can use to [create your own custom REPLs](https://nodejs.org/api/repl.html#repl_repl). We are going to learn about the basics of the standalone REPL. + +## How to Start the REPL + +Starting the REPL is simple - just run node on the command line without a filename. + +```shell +node +``` + +It then drops you into a simple prompt ('>') where you can type any JavaScript command you wish. As in most shells, you can press the up and down arrow keys to scroll through your command history and modify previous commands. + +```shell +$ node +> var x = "Hello, World!" +undefined +> x +"Hello, World!" +> .exit +``` + +You can also use the `Tab` key to autocomplete some commands. When multiple autocomplete options are available, hit `Tab` again to cycle through them. + +## Special Commands and Exiting the REPL + +The following special commands are supported by all REPL instances (from [Node.js REPL docs](https://nodejs.org/api/repl.html#repl_commands_and_special_keys): + +* `.exit` - Close the I/O stream, causing the REPL to exit. +* `.break` - When in the process of inputting a multi-line expression, entering the `.break` command (or pressing the `-C` key combination) will abort further input or processing of that expression. +* `.clear` - Resets the REPL `context` to an empty object and clears any multi-line expression currently being input. +* `.help` - Show this list of special commands. +* `.save` - Save the current REPL session to a file: `> .save ./file/to/save.js` +* `.load` - Load a file into the current REPL session. `> .load ./file/to/load.js` +* `.editor` - Enter editor mode (`-D` to finish, `-C` to cancel). + +```shell +> .editor +# Entering editor mode (-D to finish, -C to cancel) +function welcome(name) { + return `Hello ${name}!`; +} + +welcome('Node.js User'); + +# -D +'Hello Node.js User!' +> +``` + +The following key combinations in the REPL have these special effects: + +* `-C` - When pressed once, has the same effect as the `.break` command. When pressed twice on a blank line, has the same effect as the `.exit` command. +* `-D` - Has the same effect as the `.exit` command. +* `` - When pressed on a blank line, displays global and local (scope) variables. When pressed while entering other input, displays relevant autocompletion options. + +## Return Values + +Whenever you type a command, it will print the return value of the command. If you want to reuse the previous return value, you can use the special `_` variable. + +For example: + +```shell +$ node +> 1+1 +2 +> _+1 +3 +``` + +One thing worth noting where REPL return values are concerned: + +```shell +> x = 10 +10 +> var y = 5 +> x +10 +> y +5 +``` + +When the `var` keyword is used, the value of the expression is stored, but *NOT* returned. When a bare identifier is used, the value is also returned, as well as stored. + +## Accessing Modules + +If you need to access any of the builtin modules, or any third party modules, they can be accessed with `require`, just like in the rest of Node. + +For example: + +```shell +$ node +> path = require('path') +{ resolve: [Function], + normalize: [Function], + join: [Function], + dirname: [Function], + basename: [Function], + extname: [Function], + exists: [Function], + existsSync: [Function] } +> path.basename("/a/b/c.txt") +'c.txt' +``` + +Note once again that without the `var` keyword, the contents of the object are returned immediately and displayed to `stdout`. diff --git a/locale/ro/knowledge/advanced/buffers/how-to-use-buffers.md b/locale/ro/knowledge/advanced/buffers/how-to-use-buffers.md new file mode 100644 index 000000000000..e6e56ef33cc9 --- /dev/null +++ b/locale/ro/knowledge/advanced/buffers/how-to-use-buffers.md @@ -0,0 +1,188 @@ +--- +title: How to Use Buffers in Node.js +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - Buffer + - buffer + - buffers + - binary +difficulty: 3 +layout: knowledge-post.hbs +--- + +## Why Buffers? + +Pure JavaScript, while great with unicode-encoded strings, does not handle straight binary data very well. This is fine on the browser, where most data is in the form of strings. However, Node.js servers have to also deal with TCP streams and reading and writing to the filesystem, both of which make it necessary to deal with purely binary streams of data. + +One way to handle this problem is to just use strings *anyway*, which is exactly what Node.js did at first. However, this approach is extremely problematic to work with; It's slow, makes you work with an API designed for strings and not binary data, and has a tendency to break in strange and mysterious ways. + +Don't use binary strings. Use *buffers* instead! + +## What Are Buffers? + +The `Buffer` class in Node.js is designed to handle raw binary data. Each buffer corresponds to some raw memory allocated outside V8. Buffers act somewhat like arrays of integers, but aren't resizable and have a whole bunch of methods specifically for binary data. The integers in a buffer each represent a byte and so are limited to values from 0 to 255 inclusive. When using `console.log()` to print the `Buffer` instance, you'll get a chain of values in hexadecimal values. + +## Where You See Buffers: + +In the wild, buffers are usually seen in the context of binary data coming from streams, such as `fs.createReadStream`. + +## Usage: + +### Creating Buffers: + +There are a few ways to create new buffers: + +```js +var buffer = Buffer.alloc(8); +// This will print out 8 bytes of zero: +// +``` + +This buffer is initialized and contains 8 bytes of zero. + +```js +var buffer = Buffer.from([ 8, 6, 7, 5, 3, 0, 9]); +// This will print out 8 bytes of certain values: +// +``` + +This initializes the buffer to the contents of this array. Keep in mind that the contents of the array are integers representing bytes. + +```js +var buffer = Buffer.from("I'm a string!", "utf-8"); +// This will print out a chain of values in utf-8: +// +``` + +This initializes the buffer to a binary encoding of the first string as specified by the second argument (in this case, `'utf-8'`). `'utf-8'` is by far the most common encoding used with Node.js, but `Buffer` also supports others. See [Supported Encodings](https://nodejs.org/dist/latest/docs/api/buffer.html#buffer_buffers_and_character_encodings) for more details. + +### Writing to Buffers + +Given that there is already a buffer created: + +``` +> var buffer = Buffer.alloc(16) +``` + +we can start writing strings to it: + +``` +> buffer.write("Hello", "utf-8") +5 +``` + +The first argument to `buffer.write` is the string to write to the buffer, and the second argument is the string encoding. It happens to default to utf-8 so this argument is extraneous. + +`buffer.write` returned 5. This means that we wrote to five bytes of the buffer. The fact that the string "Hello" is also 5 characters long is coincidental, since each character *just happened* to be 8 bits apiece. This is useful if you want to complete the message: + +``` +> buffer.write(" world!", 5, "utf-8") +7 +``` + +When `buffer.write` has 3 arguments, the second argument indicates an offset, or the index of the buffer to start writing at. + +### Reading from Buffers: + +#### toString: + +Probably the most common way to read buffers is to use the `toString` method, since many buffers contain text: + +``` +> buffer.toString('utf-8') +'Hello world!\u0000�k\t' +``` + +Again, the first argument is the encoding. In this case, it can be seen that not the entire buffer was used! Luckily, because we know how many bytes we've written to the buffer, we can simply add more arguments to "stringify" the slice that's actually interesting: + +``` +> buffer.toString("utf-8", 0, 12) +'Hello world!' +``` + +#### Individual octets: + +You can also set individual bytes by using an array-like syntax: + +``` +> buffer[12] = buffer[11]; +33 +> buffer[13] = "1".charCodeAt(); +49 +> buffer[14] = buffer[13]; +49 +> buffer[15] = 33 +33 +> buffer.toString("utf-8") +'Hello world!!11!' +``` + +In this example, I set the remaining bytes, by hand, such that they represent utf-8 encoded "!" and "1" characters. + +### More Fun With Buffers + +#### Buffer.isBuffer(object) + +This method checks to see if `object` is a buffer, similar to `Array.isArray`. + +#### Buffer.byteLength(string, encoding) + +With this function, you can check the number of bytes required to encode a string with a given encoding (which defaults to utf-8). This length is *not* the same as string length, since many characters require more bytes to encode. For example: + +``` +> var snowman = "☃"; +> snowman.length +1 +> Buffer.byteLength(snowman) +3 +``` + +The unicode snowman is only one character, but takes 3 entire bytes to encode! + +#### buffer.length + +This is the length of your buffer, and represents how much memory is allocated. It is not the same as the size of the buffer's contents, since a buffer may be half-filled. For example: + +``` +> var buffer = Buffer.alloc(16) +> buffer.write(snowman) +3 +> buffer.length +16 +``` + +In this example, the contents written to the buffer only consist of three groups (since they represent the single-character snowman), but the buffer's length is still 16, as it was initialized. + +#### buffer.copy(target, targetStart=0, sourceStart=0, sourceEnd=buffer.length) + +`buffer.copy` allows one to copy the contents of one buffer onto another. The first argument is the target buffer on which to copy the contents of `buffer`, and the rest of the arguments allow for copying only a subsection of the source buffer to somewhere in the middle of the target buffer. For example: + +``` +> var frosty = Buffer.alloc(24) +> var snowman = Buffer.from("☃", "utf-8") +> frosty.write("Happy birthday! ", "utf-8") +16 +> snowman.copy(frosty, 16) +3 +> frosty.toString("utf-8", 0, 19) +'Happy birthday! ☃' +``` + +In this example, I copied the "snowman" buffer, which contains a 3 byte long character, to the "frosty" buffer, to which I had written to the first 16 bytes. Because the snowman character is 3 bytes long, the result takes up 19 bytes of the buffer. + +#### buffer.slice(start, end=buffer.length) + +This method's API is generally the same as that of `Array.prototype.slice`, but with one very import difference: The slice is **not** a new buffer and merely references a subset of the memory space. *Modifying the slice will also modify the original buffer*! For example: + +``` +> var puddle = frosty.slice(16, 19) +> puddle.toString() +'☃' +> puddle.write("___") +3 +> frosty.toString("utf-8", 0, 19) +'Happy birthday! ___' +``` + +Now Frosty has been turned into a puddle of underscores. Bummer. diff --git a/locale/ro/knowledge/advanced/streams/how-to-use-fs-create-read-stream.md b/locale/ro/knowledge/advanced/streams/how-to-use-fs-create-read-stream.md new file mode 100644 index 000000000000..de859af4e768 --- /dev/null +++ b/locale/ro/knowledge/advanced/streams/how-to-use-fs-create-read-stream.md @@ -0,0 +1,36 @@ +--- +title: How to use fs.createReadStream? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams + - fs +difficulty: 3 +layout: knowledge-post.hbs +--- + +The function `fs.createReadStream()` allows you to open up a readable stream in a very simple manner. All you have to do is pass the path of the file to start streaming in. It turns out that the response (as well as the request) objects are streams. So we will use this fact to create a http server that streams the files to the client. Since the code is simple enough, it is pretty easy just to read through it and comment why each line is necessary. + +```javascript +var http = require('http'); +var fs = require('fs'); + +http.createServer(function(req, res) { + // The filename is simple the local directory and tacks on the requested url + var filename = __dirname+req.url; + + // This line opens the file as a readable stream + var readStream = fs.createReadStream(filename); + + // This will wait until we know the readable stream is actually valid before piping + readStream.on('open', function () { + // This just pipes the read stream to the response object (which goes to the client) + readStream.pipe(res); + }); + + // This catches any errors that happen while creating the readable stream (usually invalid names) + readStream.on('error', function(err) { + res.end(err); + }); +}).listen(8080); +``` diff --git a/locale/ro/knowledge/advanced/streams/how-to-use-fs-create-write-stream.md b/locale/ro/knowledge/advanced/streams/how-to-use-fs-create-write-stream.md new file mode 100644 index 000000000000..6c41a122da68 --- /dev/null +++ b/locale/ro/knowledge/advanced/streams/how-to-use-fs-create-write-stream.md @@ -0,0 +1,36 @@ +--- +title: How to use fs.createWriteStream? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams + - fs +difficulty: 3 +layout: knowledge-post.hbs +--- + +The function `fs.createWriteStream()` creates a writable stream in a very simple manner. After a call to `fs.createWriteStream()` with the filepath, you have a writeable stream to work with. It turns out that the response (as well as the request) objects are streams. So we will stream the `POST` data to the file `output`. Since the code is simple enough, it is pretty easy just to read through it and comment why each line is necessary. + +```javascript +var http = require('http'); +var fs = require('fs'); + +http.createServer(function(req, res) { + // This opens up the writeable stream to `output` + var writeStream = fs.createWriteStream('./output'); + + // This pipes the POST data to the file + req.pipe(writeStream); + + // After all the data is saved, respond with a simple html form so they can post more data + req.on('end', function () { + res.writeHead(200, {"content-type":"text/html"}); + res.end('
'); + }); + + // This is here incase any errors occur + writeStream.on('error', function (err) { + console.log(err); + }); +}).listen(8080); +``` diff --git a/locale/ro/knowledge/advanced/streams/how-to-use-stream-pipe.md b/locale/ro/knowledge/advanced/streams/how-to-use-stream-pipe.md new file mode 100644 index 000000000000..b357ce7efb42 --- /dev/null +++ b/locale/ro/knowledge/advanced/streams/how-to-use-stream-pipe.md @@ -0,0 +1,94 @@ +--- +title: How to use stream.pipe +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams +difficulty: 2 +layout: knowledge-post.hbs +--- + +If you've been using Node.js for a while, you've definitely run into streams. HTTP connections are streams, open files are streams; stdin, stdout, and stderr are all streams as well. A 'stream' is node's I/O abstraction - if you feel like you still need to understand them better, you can read more about them [here](https://nodejs.org/api/stream.html#stream_stream). + +Streams make for quite a handy abstraction, and there's a lot you can do with them - as an example, let's take a look at `stream.pipe()`, the method used to take a readable stream and connect it to a writeable steam. Suppose we want to spawn a `node` child process and pipe our stdout and stdin to its corresponding stdout and stdin. + +```javascript +#!/usr/bin/env node + +var child = require('child_process'); + +var myREPL = child.spawn('node'); + +myREPL.stdout.pipe(process.stdout, { end: false }); + +process.stdin.resume(); + +process.stdin.pipe(myREPL.stdin, { end: false }); + +myREPL.stdin.on('end', function() { + process.stdout.write('REPL stream ended.'); +}); + +myREPL.on('exit', function (code) { + process.exit(code); +}); +``` + +There you have it - spawn the Node.js REPL as a child process, and pipe your stdin and stdout to its stdin and stdout. Make sure to listen for the child's 'exit' event, too, or else your program will just hang there when the REPL exits. + +Another use for `stream.pipe()` is file streams. In Node.js, `fs.createReadStream()` and `fs.createWriteStream()` are used to create a stream to an open file descriptor. Now let's look at how one might use `stream.pipe()` to write to a file. You'll probably recognize most of the code: + +```javascript +#!/usr/bin/env node + +var child = require('child_process'), + fs = require('fs'); + +var myREPL = child.spawn('node'), + myFile = fs.createWriteStream('myOutput.txt'); + +myREPL.stdout.pipe(process.stdout, { end: false }); +myREPL.stdout.pipe(myFile); + +process.stdin.resume(); + +process.stdin.pipe(myREPL.stdin, { end: false }); +process.stdin.pipe(myFile); + +myREPL.stdin.on("end", function() { + process.stdout.write("REPL stream ended."); +}); + +myREPL.on('exit', function (code) { + process.exit(code); +}); +``` + +With those small additions, your stdin and the stdout from your REPL will both be piped to the writeable file stream you opened to 'myOutput.txt'. It's that simple - you can pipe streams to as many places as you want. + +Another very important use case for `stream.pipe()` is with HTTP request and response objects. Here we have the very simplest kind of proxy: + +```javascript +#!/usr/bin/env node + +var http = require('http'); + +http.createServer(function(request, response) { + var proxy = http.createClient(9000, 'localhost') + var proxyRequest = proxy.request(request.method, request.url, request.headers); + proxyRequest.on('response', function (proxyResponse) { + proxyResponse.pipe(response); + }); + request.pipe(proxyRequest); +}).listen(8080); + +http.createServer(function (req, res) { + res.writeHead(200, { 'Content-Type': 'text/plain' }); + res.write('request successfully proxied to port 9000!' + '\n' + JSON.stringify(req.headers, true, 2)); + res.end(); +}).listen(9000); +``` + +One could also use `stream.pipe()` to send incoming requests to a file for logging, or to a child process, or any one of a number of other things. + +Hopefully this has shown you the basics of using `stream.pipe()` to easily pass your data streams around. It's truly a powerful little trick in Node.js, and its uses are yours to explore. Happy coding, and try not to cross your streams! diff --git a/locale/ro/knowledge/advanced/streams/what-are-streams.md b/locale/ro/knowledge/advanced/streams/what-are-streams.md new file mode 100644 index 000000000000..ab5d98cbfd0a --- /dev/null +++ b/locale/ro/knowledge/advanced/streams/what-are-streams.md @@ -0,0 +1,46 @@ +--- +title: What are streams? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams +difficulty: 3 +layout: knowledge-post.hbs +--- + +Streams are another basic construct in Node.js that encourages asynchronous coding. Streams allow you to process the data as it is generated or retrieved. Streams can be readable, writeable, or both. + +In other words, Streams use events to deal with data as it happens, rather than only with a callback at the end. Readable streams emit the event `data` for each chunk of data that comes in, and an `end` event, which is emitted when there is no more data. Writeable streams can be written to with the `write()` function, and closed with the `end()` function. All types of streams emit `error` events when errors arise. + +As a quick example, we can write a simple version of `cp` (the Unix utility that copies files). We could do that by reading the whole file with standard filesystem calls and then writing it out to a file. Unfortunately, that requires that the whole file be read in before it can be written. In this case, writing the file isn't faster, but if we were streaming over a network or doing CPU processing on the data, then there could be measurable performance improvements. + +Run this script with arguments like `node cp.js src.txt dest.txt`. This would mean, in the code below, that `process.argv[2]` is `src.txt` and `process.argv[3]` is `desc.txt`. + +```javascript +var fs = require('fs'); +console.log(process.argv[2], '->', process.argv[3]); + +var readStream = fs.createReadStream(process.argv[2]); +var writeStream = fs.createWriteStream(process.argv[3]); + +readStream.on('data', function (chunk) { + writeStream.write(chunk); +}); + +readStream.on('end', function () { + writeStream.end(); +}); + +//Some basic error handling +readStream.on('error', function (err) { + console.log("ERROR", err); +}); + +writeStream.on('error', function (err) { + console.log("ERROR", err); +}); +``` + +This sets up a readable stream from the source file and a writable stream to the destination file. Then whenever the readable stream gets data, it gets written to the writeable stream. Then finally it closes the writable stream when the readable stream is finished. + +It would have been better to use [pipe](/en/knowledge/advanced/streams/how-to-use-stream-pipe/) like `readStream.pipe(writeStream);`, however, to show how streams work, we have done things the long way. diff --git a/locale/ro/knowledge/child-processes/how-to-spawn-a-child-process.md b/locale/ro/knowledge/child-processes/how-to-spawn-a-child-process.md new file mode 100644 index 000000000000..245d5d8f7479 --- /dev/null +++ b/locale/ro/knowledge/child-processes/how-to-spawn-a-child-process.md @@ -0,0 +1,60 @@ +--- +title: How to spawn a child process - the basics +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - child_process +difficulty: 2 +layout: knowledge-post.hbs +--- + +If you find yourself wishing you could have your Node.js process start another program for you, then look no further than the `child_process` module. + +The simplest way is the "fire, forget, and buffer" method using `child_process.exec`. It runs your process, buffers its output (up to a default maximum of 200kb), and lets you access it from a callback when it is finished. + +The examples you will see in this article are all Linux-based. On Windows, you need to switch these commands with their Windows alternatives. + +Take a look at an example: + +```js +const { exec } = require('child_process'); + +const ls = exec('ls -l', function (error, stdout, stderr) { + if (error) { + console.log(error.stack); + console.log('Error code: '+error.code); + console.log('Signal received: '+error.signal); + } + console.log('Child Process STDOUT: '+stdout); + console.log('Child Process STDERR: '+stderr); +}); + +ls.on('exit', function (code) { + console.log('Child process exited with exit code '+code); +}); +``` + +`error.stack` is a stack trace to the point that the [Error object](/en/knowledge/errors/what-is-the-error-object/) was created. + +The `stderr` of a given process is not exclusively reserved for error messages. Many programs use it as a channel for secondary data instead. As such, when trying to work with a program that you have not previously spawned as a child process, it can be helpful to start out dumping both `stdout` and `stderr`, as shown above, to avoid any surprises. + +While `child_process.exec` buffers the output of the child process for you, it also returns a `ChildProcess` object, which wraps a still-running process. In the example above, since we are using `ls`, a program that will exit immediately regardless, the only part of the `ChildProcess` object worth worrying about is the `on exit` handler. It is not necessary here - the process will still exit and the error code will still be shown on errors. + +**Buffering the Output** means that the output of the command is loaded into the memory before sending to `stdout` or `stderr` and as mentioned above a default of 200KB can be buffered into the memory. This feature has both pros and cons: + +Pros: + +* You can pipe the output of one command as the input to another (just like you could in Linux). Example `ls -al | grep '^package'` will show the list of all the sub-directories in the current directory that begin with the word `'package'`. + +Cons: + +* Buffering the entire data into memory will affect the process performance. +* Only a set maximum size of data can be buffered. + +There are other very useful spawning functions like: `.spawn()`, `.fork()`, `.execFile()`. + +* `child_process.spawn()`: The spawn function launches a command in a new process and you can use it to pass that command any arguments. It's the most generic spawning function and all other functions are built over it [[docs]](https://nodejs.org/api/child_process.html#child_process_child_process). +* `child_process.execFile()`: The execFile function is similar to `child_process.exec(`) except that it spawns the command directly without first spawning a shell by default [[docs]](https://nodejs.org/api/child_process.html#child_process_child_process_execfile_file_args_options_callback). +* `child_process.fork()`: The fork function spawns a new Node.js process and invokes a specified module with an IPC communication channel established that allows sending messages between parent and child [[docs]](https://nodejs.org/api/child_process.html#child_process_child_process_fork_modulepath_args_options). + +The functions `.exec()`, `.spawn()` and `.execFile()` do have their synchronous blocking versions that will wait until the child process exits namely `.execSync()`, `.spawnSync()` and `.execFileSync()` respectively. These blocking versions are particularly useful for one time startup processing tasks diff --git a/locale/ro/knowledge/command-line/how-to-get-colors-on-the-command-line.md b/locale/ro/knowledge/command-line/how-to-get-colors-on-the-command-line.md new file mode 100644 index 000000000000..eb876efc1b02 --- /dev/null +++ b/locale/ro/knowledge/command-line/how-to-get-colors-on-the-command-line.md @@ -0,0 +1,144 @@ +--- +title: How to get colors on the command line +date: '2011-08-26T10:08:50.000Z' +tags: + - cli +difficulty: 1 +layout: knowledge-post.hbs +--- + +When working on the command line, it can be both fun and extremely useful to colorize one's output. To colorize console output, you need to use ANSI escape codes. The module [colors.js](https://www.npmjs.com/package/colors), available on `npm`, provides an extremely easy to use wrapper that makes adding colors a breeze. + +First, install it to the directory you'd like to work in. + +```bash +npm install colors +``` + +Now open up a little test script for yourself, and try something like this: + +```js +const colors = require('colors'); + +const stringOne = 'This is a plain string.'; +const stringTwo = 'This string is red.'.red; +const stringThree = 'This string is blue.'.blue; +const today = new Date().toLocaleDateString(); // returns today's date in mm/dd/yyyy format + +console.log(stringOne.black.bgMagenta); +console.log(stringOne.yellow.bgRed.bold); +console.log(`Today is: ${today}`.black.bgGreen); + +console.log(stringTwo); +console.log(stringThree); + +console.log(stringTwo.magenta); +console.log(stringThree.grey.bold); +``` + +There are several things to take note of here - first, the string object has been prototyped, so any color may be added simply by adding the property to the string! It works on string literals, template literals and on variables, as shown at the top of the example above. + +Notice, also, from the second pair of `console.log` statements, that once set, a color value persists as part of the string. This is because under the hood, the proper ANSI color tags have been prepended and appended as necessary - anywhere the string gets passed where ANSI color codes are also supported, the color will remain. + +The last pair of `console.log` statements are probably the most important. Because of the way `colors.js` and ANSI color codes work, if more than one color property is set on a string, **only the first color property to be set on the string takes effect.** This is because the colors function as 'state shifts' rather than as tags. + +Let's look at a more explicit example. If you set the following properties with `colors.js`: + +```js +myString.red.blue.green +``` + +You can think of your terminal saying to itself, "Make this green. No, make this blue. No, make this red. No more color codes now? Red it is, then." The codes are read in the reverse order, and the last/'innermost' is applied. This can be extremely useful if you're using a library that sets its own default colors that you don't like - if you set a color code yourself on the string you pass in to the library, it will supersede the other author's color code(s). + +The last thing to note is the final line of the example script. While a color code was set previously, a 'bold' code was not, so the example was made bold, but not given a different color. + +### Using `colors` without changing `String.prototype` +Now an instance of `colors` can also be used. Though this approach is slightly less nifty but is beginner friendly and is specially useful if you don't want to touch `String.prototype`. Some example of this are: + +```js +const colors = require('colors'); + +const stringOne = 'This is a plain string.'; +const stringTwo = 'This string is red.'; +const stringThree = 'This string is blue.'; +const today = new Date().toLocaleDateString(); // returns today's date in mm/dd/yyyy format + +console.log(colors.bgMagenta.black(stringOne)); +console.log(colors.bold.bgRed.yellow(stringOne)); +console.log(colors.bgGreen.black(`Today is: ${today}`)); + +console.log(colors.red(stringTwo)); +console.log(colors.blue(stringThree)); + +console.log(colors.magenta.red(stringTwo)); +console.log(colors.bold.grey.black.blue(stringThree)); +``` + +Unlike the `String.prototype` approach, the chained methods on the `colors` instance are executed left to right i.e., the method closest to the string is finally applied. In the last `console.log` you can think of your terminal saying to itself, "Make this grey. Now, make this black. Now, make this blue. No more coloring methods now? Blue it is, then." + +With the latest version of `colors.js` you can also define **[Custom Themes](https://www.npmjs.com/package/colors#custom-themes)** in `color.js`, which makes our code more Robust and allows better Encapsulation of data. A nice use case of this maybe: + +```js +var colors = require('colors'); + +colors.setTheme({ + info: 'bgGreen', + help: 'cyan', + warn: 'yellow', + success: 'bgBlue', + error: 'red' +}); + +// outputs red text +console.log("this is an error".error); + +// outputs text on blue background +console.log("this is a success message".success); +``` + +One last thing: the colors can look quite different in different terminals - sometimes, `bold` is bold, sometimes it's just a different color. Try it out and see for yourself! + +For reference, here's the full list of available `colors.js` properties. + +### text colors + +* black +* red +* green +* yellow +* blue +* magenta +* cyan +* white +* gray +* grey + +### background colors + +* bgBlack +* bgRed +* bgGreen +* bgYellow +* bgBlue +* bgMagenta +* bgCyan +* bgWhite + +### styles + +* reset +* bold +* dim +* italic +* underline +* inverse +* hidden +* strikethrough + +### extras + +* rainbow +* zebra +* america +* trap +* random diff --git a/locale/ro/knowledge/command-line/how-to-parse-command-line-arguments.md b/locale/ro/knowledge/command-line/how-to-parse-command-line-arguments.md new file mode 100644 index 000000000000..611304f04106 --- /dev/null +++ b/locale/ro/knowledge/command-line/how-to-parse-command-line-arguments.md @@ -0,0 +1,127 @@ +--- +title: How to parse command line arguments +date: '2011-08-26T10:08:50.000Z' +tags: + - cli +difficulty: 1 +layout: knowledge-post.hbs +--- + +Passing in arguments via the command line is an extremely basic programming task, and a necessity for anyone trying to write a simple Command-Line Interface (CLI). In Node.js, as in C and many related environments, all command-line arguments received by the shell are given to the process in an array called `argv` (short for 'argument values'). + +Node.js exposes this array for every running process in the form of `process.argv` - let's take a look at an example. Make a file called `argv.js` and add this line: + +```js +console.log(process.argv); +``` + +Now save it, and try the following in your shell: + +```bash +$ node argv.js one two three four five +[ 'node', + '/home/avian/argvdemo/argv.js', + 'one', + 'two', + 'three', + 'four', + 'five' ] +``` + +There you have it - an array containing any arguments you passed in. Notice the first two elements - `node` and the path to your script. These will always be present - even if your program takes no arguments of its own, your script's interpreter and path are still considered arguments to the shell you're using. + +Where everyday CLI arguments are concerned, you'll want to skip the first two. Now try this in `argv.js`: + +```js +var myArgs = process.argv.slice(2); +console.log('myArgs: ', myArgs); +``` + +This yields: + +```bash +$ node argv.js one two three four +myArgs: [ 'one', 'two', 'three', 'four' ] +``` + +Now let's actually do something with the args: + +```js +var myArgs = process.argv.slice(2); +console.log('myArgs: ', myArgs); + +switch (myArgs[0]) { +case 'insult': + console.log(myArgs[1], 'smells quite badly.'); + break; +case 'compliment': + console.log(myArgs[1], 'is really cool.'); + break; +default: + console.log('Sorry, that is not something I know how to do.'); +} +``` + +JS PRO TIP: Remember to `break` after each `case` - otherwise you'll run the next case too! + +Referring to your command-line arguments by array index isn't very clean, and can quickly turn into a nightmare when you start working with flags and the like - imagine you made a server, and it needed a lot of arguments. Imagine having to deal with something like `myapp -h host -p port -r -v -b --quiet -x -o outfile` - some flags need to know about what comes next, some don't, and most CLIs let users specify arguments in any order they want. Sound like a fun string to parse? + +Luckily, there are many third party modules that makes all of this trivial - one of which is [yargs](https://www.npmjs.com/package/yargs). It's available via `npm`. Use this command from your app's base path: + +``` +npm i yargs +``` + +Once you have it, give it a try - it can really be a life-saver. Lets test it with little fun Leap Year checker and Current Time teller + +```js +const yargs = require('yargs'); + +const argv = yargs + .command('lyr', 'Tells whether an year is leap year or not', { + year: { + description: 'the year to check for', + alias: 'y', + type: 'number', + } + }) + .option('time', { + alias: 't', + description: 'Tell the present Time', + type: 'boolean', + }) + .help() + .alias('help', 'h') + .argv; + +if (argv.time) { + console.log('The current time is: ', new Date().toLocaleTimeString()); +} + +if (argv._.includes('lyr')) { + const year = argv.year || new Date().getFullYear(); + if (((year % 4 == 0) && (year % 100 != 0)) || (year % 400 == 0)) { + console.log(`${year} is a Leap Year`); + } else { + console.log(`${year} is NOT a Leap Year`); + } +} + +console.log(argv); +``` + +The last line was included to let you see how `yargs` handles your arguments. Here's a quick reference: + +* `argv.$0` contains the name of the script file which is executed like: `'$0': 'myapp.js'`. +* `argv._` is an array containing each element not attached to an option(or flag) these elements are referred as `commands` in yargs. +* Individual options(flags) become properties of `argv`, such as with `argv.h` and `argv.time`. Note that non-single-letter flags must be passed in as `--flag` like: `node myapp.js --time`. + +A summary of elements used in the program: + +* **argv**: This is the modified `process.argv` which we have configured with yargs. +* **command()**: This method is used to add commands, their description and options which are specific to these commands only, like in the above code `lyr` is the command and `-y` is lyr specific option: `node myapp.js lyr -y 2016` +* **option()**: This method is used to add global options(flags) which can be accessed by all commands or without any command. +* **help()**: This method is used to display a help dialogue when `--help` option is encountered which contains description of all the `commands` and `options` available. +* **alias()**: This method provides an alias name to an option, like in the above code both `--help` and `-h` triggers the help dialogue. + +For more information on yargs and the many, many other things it can do for your command-line arguments, please visit [http://yargs.js.org/docs/](http://yargs.js.org/docs/) diff --git a/locale/ro/knowledge/command-line/how-to-prompt-for-command-line-input.md b/locale/ro/knowledge/command-line/how-to-prompt-for-command-line-input.md new file mode 100644 index 000000000000..41d2b189fddc --- /dev/null +++ b/locale/ro/knowledge/command-line/how-to-prompt-for-command-line-input.md @@ -0,0 +1,106 @@ +--- +title: How do I prompt users for input from a command-line script? +date: '2011-08-26T10:08:50.000Z' +tags: + - javascript + - core + - cli +difficulty: 2 +layout: knowledge-post.hbs +--- + +So you've got a little CLI tool, but you want to be able to prompt a user for additional data after the script has started, rather than passing it in as a command line argument or putting it in a file. To do this, you'll need to listen to STDIN ("standard input", i.e. your keyboard), which Node.js exposes for you as `process.stdin`, a readable stream. + +Streams are the Node.js way of dealing with evented I/O - it's a big topic, and you can read more about them [here](https://nodejs.org/api/stream.html). For now, we're going to use the built-in `readline` module which is a wrapper around Standard I/O, suitable for taking user input from command line(terminal). + +Here's a simple example. Try the following in a new file: + +```js +const readline = require("readline"); +const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout +}); + +rl.question("What is your name ? ", function(name) { + rl.question("Where do you live ? ", function(country) { + console.log(`${name}, is a citizen of ${country}`); + rl.close(); + }); +}); + +rl.on("close", function() { + console.log("\nBYE BYE !!!"); + process.exit(0); +}); +``` + +In the above code `readline.createInterface()` is used for creating an instance of `readline` by configuring the readable and the writable streams. The `input` key takes a readable stream like `process.stdin` or `fs.createReadStream('file.txt')` and the `output` key takes a writable stream like `process.stdout` or `process.stderr`. + +The `rl.question()` method displays the query by writing it to the `output`, waits for user input to be provided on `input`, then invokes the `callback` function passing the provided input as the first argument. + +NODE PRO TIP: Do remember to use `rl.close()` to close the transmitting otherwise the process will be left in the `idle` state. + +The last part of the code uses `rl.on()` method to add an event listener to the `close` event which simply `console.log` to the output stream and exits the process. This part is completely optional and can be removed at will. For more in-depth details and usage refer to the docs [here](https://nodejs.org/api/readline.html). + +If all of this sounds complicated, or if you want a higher-level interface to this sort of thing, don't worry - as usual, the Node.js community has come to the rescue. One particularly friendly module to use for this is `prompt`, available on `npm`: + +```bash +npm install prompt +``` + +Prompt is built to be easy - if your eyes started to glaze over as soon as you saw `Readable Stream`, then this is the section for you. Compare the following to the example above: + +```js +const prompt = require('prompt'); + +prompt.start(); + +prompt.get(['username', 'email'], function (err, result) { + if (err) { return onErr(err); } + console.log('Command-line input received:'); + console.log(' Username: ' + result.username); + console.log(' Email: ' + result.email); +}); + +function onErr(err) { + console.log(err); + return 1; +} +``` + +NODE PRO TIP: This short script also demonstrates proper error handling in node - errors are a callback's first argument, and `return` is used with the error handler so that the rest of the function doesn't execute when errors happen. + +Prompt also makes it trivial to handle a certain set of recurring properties that one might want to attach. + +```js +const prompt = require('prompt'); + +const properties = [ + { + name: 'username', + validator: /^[a-zA-Z\s\-]+$/, + warning: 'Username must be only letters, spaces, or dashes' + }, + { + name: 'password', + hidden: true + } +]; + +prompt.start(); + +prompt.get(properties, function (err, result) { + if (err) { return onErr(err); } + console.log('Command-line input received:'); + console.log(' Username: ' + result.username); + console.log(' Password: ' + result.password); +}); + +function onErr(err) { + console.log(err); + return 1; +} +``` + +For more information on Prompt, please see [the project's GitHub page](https://github.com/flatiron/prompt). diff --git a/locale/ro/knowledge/cryptography/how-to-use-crypto-module.md b/locale/ro/knowledge/cryptography/how-to-use-crypto-module.md new file mode 100644 index 000000000000..9811383edd6e --- /dev/null +++ b/locale/ro/knowledge/cryptography/how-to-use-crypto-module.md @@ -0,0 +1,163 @@ +--- +title: How to use the crypto module +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - crypto +difficulty: 3 +layout: knowledge-post.hbs +--- + +The [crypto](https://nodejs.org/api/crypto.html) module is a wrapper for [OpenSSL](https://en.wikipedia.org/wiki/Openssl) cryptographic functions. It supports calculating hashes, authentication with HMAC, ciphers, and more! + +The crypto module is mostly useful as a tool for implementing [cryptographic protocols](https://en.wikipedia.org/wiki/Cryptographic_protocol) such as [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security) and [https](https://en.wikipedia.org/wiki/Https). For most users, the built-in [tls module](https://nodejs.org/api/tls.html) and [https module](https://nodejs.org/api/https.html) should more than suffice. However, for the user that only wants to use small parts of what's needed for full-scale cryptography or is crazy/desperate enough to implement a protocol using OpenSSL and Node.js: Read on. + +## Hashes + +### What Is A Hash? + +A hash is a fixed-length string of bits that is procedurally and deterministically generated from some arbitrary block of source data. Some important properties of these hashes (the type useful for cryptography) include: + +* **Fixed length:** This means that, no matter what the input, the length of the hash is the same. For example, SHA-256 hashes are always 256 bits long whether the input data is a few bits or a few gigabytes. + +* **Deterministic:** For the same input, you should expect to be able to calculate exactly the same hash. This makes hashes useful for checksums. + +* **Collision-Resistant:** A collision is when the same hash is generated for two different input blocks of data. Hash algorithms are designed to be extremely unlikely to have collisions -- just how unlikely is a property of the hash algorithm. The importance of this property depends on the use case. + +* **Unidirectional:** A good hash algorithm is easy to apply, but hard to undo. This means that, given a hash, there isn't any reasonable way to find out what the original piece of data was. + +### Hash Algorithms That Work With Crypto + +The hashes that work with crypto are dependent on what your version of OpenSSL supports. If you have a new enough version of OpenSSL, you can get a list of hash types your OpenSSL supports by typing `openssl list-message-digest-algorithms` into the command line. For older versions, simply type `openssl list-message-digest-commands` instead! + +One of the most common hash algorithms is [SHA-256](https://en.wikipedia.org/wiki/SHA-2). Older popular types like **[SHA-1](https://en.wikipedia.org/wiki/Sha1) or [MD5](https://en.wikipedia.org/wiki/MD5#Security) are not secure any more** and should not be used. + +### How To Calculate Hashes with Crypto + +Crypto has a method called `createHash` which allows you to calculate a hash. Its only argument is a string representing the hash This example finds the SHA-256 hash for the string, "Man oh man do I love node!": + +```js +require("crypto") + .createHash("sha256") + .update("Man oh man do I love node!") + .digest("hex"); +``` + +The `update` method is used to push data to later be turned into a hash with the `digest` method. `update` can be invoked multiple times to ingest streaming data, such as buffers from a file read stream. The argument for `digest` represents the output format, and may either be "binary", "hex" or "base64". It defaults to binary. + +## HMAC + +HMAC stands for Hash-based Message Authentication Code, and is a process for applying a hash algorithm to both data and a secret key that results in a single final hash. Its use is similar to that of a vanilla hash, but also allows to check the *authenticity* of data as *well* as the integrity of said data (as you can using SHA-256 checksums). + +The API for hmacs is very similar to that of `createHash`, except that the method is called `createHmac` and it takes a key as a second argument: + +```js +require("crypto").createHmac("sha256", "password") + .update("If you love node so much why don't you marry it?") + .digest("hex"); +``` + +The resulting SHA-256 hash is unique to both the input data and the key. + +## Ciphers + +Ciphers allow you to encode and decode messages given a password. + +### Cipher Algorithms That Work With Crypto + +Like crypto's hash algorithms, the cyphers that work with crypto are dependent on what your version of OpenSSL supports. You can get a list of hash types your OpenSSL supports by typing `openssl list-cipher-commands` into the command line for older versions, or `openssl list-cipher-algorithms` for newer versions of OpenSSL. OpenSSL supports *many* ciphers; A good and popular one is [AES_256](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard). + +### How To Use Cipher Algorithms with Crypto: + +Crypto comes with two methods for ciphering and deciphering: + +* `crypto.createCipheriv(algorithm, key, iv)` +* `crypto.createDecipheriv(algorithm, key, iv)` + +Both of these methods take arguments similarly to `createHmac`. They also both have analogous `update` functions. However, each use of `update` returns a chunk of the encoded/decoded data instead of requiring one to call `digest` to get the result. Moreover, after encoding (or decoding) your data, you will likely have to call the `final` method to get the last chunk of encoded information. + +Another important addition in the cipher method is of the `iv` or [initialization vector](https://en.wikipedia.org/wiki/Initialization_vector). Initialization vectors should be unpredictable and unique, typically required to be random or pseudorandom. Randomization is crucial for encryption schemes to achieve semantic security, a property whereby repeated usage of the scheme under the same key does not allow an attacker to infer relationships between segments of the encrypted message. + +Here's an example, slightly less trivial than previous examples, that uses crypto and [yargs](https://github.com/yargs/yargs) to encode and decode messages from the command line: + +```js +#!/usr/bin/env node + +const crypto = require('crypto'), + argv = require("yargs").argv, + resizedIV = Buffer.allocUnsafe(16), + iv = crypto + .createHash("sha256") + .update("myHashedIV") + .digest(); + +iv.copy(resizedIV); + +if (argv.e && argv.key) { + const key = crypto + .createHash("sha256") + .update(argv.key) + .digest(), + cipher = crypto.createCipheriv("aes256", key, resizedIV), + msg = []; + + argv._.forEach( function (phrase) { + msg.push(cipher.update(phrase, "binary", "hex")); + }); + + msg.push(cipher.final("hex")); + console.log(msg.join("")); + +} else if (argv.d && argv.key) { + const key = crypto + .createHash("sha256") + .update(argv.key) + .digest(), + decipher = crypto.createDecipheriv("aes256", key, resizedIV), + msg = []; + + argv._.forEach( function (phrase) { + msg.push(decipher.update(phrase, "hex", "binary")); + }); + + msg.push(decipher.final("binary")); + console.log(msg.join("")); +} +``` + +NODE PRO TIP: The `crypto.createCipheriv()` and `crypto.createDecipheriv()` methods do not take a password, rather a `key` and an `iv` which are combined together to form a random password. The size of the `key` and `iv` depends on the chosen algorithm. A reference to common algorithms and their `key` and `iv` size is given below: + +| Algorithm | Key | iv | +| ----------- | ------------------ | ------------------ | +| aes128 | 16 byte (128 bits) | 16 byte (128 bits) | +| aes-128-cbc | 16 byte (128 bits) | 16 byte (128 bits) | +| aes192 | 24 byte (192 bits) | 16 byte (128 bits) | +| aes256 | 32 byte (256 bits) | 16 byte (128 bits) | + +In the code above The user entered `key` is hashed using `SHA-256 encryption` which produces a 32 byte buffer by default, this buffered key is then used as the [cryptographic key](https://en.wikipedia.org/wiki/Key_(cryptography)) in the `crypto.createCipheriv()` and `crypto.createDecipheriv()` methods. The `iv` is also hashed with `SHA-256 encryption` and is 32 byte in size but all AES (CBC mode and CFB mode) take `iv` of exactly 16 byte (128 bits) therefor another Buffer `resizedIV` is used which contains the first 16 byte of original 32 byte `iv`. + +Using this script to encode a message looks like this: + +```bash +$ node ./secretmsg.js -e --key="popcorn" "My treasure is buried behind Carl's Jr. on Telegraph." +c8c78895fd91da17cca9cf0d28e742c6077fb5a89ef5cdc23d9c37c96c5fb7f321d7f52c06e73c46633783d9535e2aa5cc07f2ad1803d73614c4e6882026bfd9 +``` + +Now, if I gave somebody the same script, my encoded message and the key, they can decode the message and find out where I buried my treasure: + +```bash +$ node ./secretmsg.js -d --key="popcorn" c8c78895fd91da17cca9cf0d28e742c6077fb5a89ef5cdc23d9c37c96c5fb7f321d7f52c06e73c46633783d9535e2aa5cc07f2ad1803d73614c4e6882026bfd9 +My treasure is buried behind Carl's Jr. on Telegraph. +``` + +You should know that what I buried behind Carl's Jr was just a cigarette butt, and that this script is obviously not for serious use. + +## Signing and Verification + +Crypto has other methods used for dealing with certificates and credentials, as used for TLS: + +* `crypto.createCredentials` +* `crypto.createSign` +* `crypto.createVerify` + +These methods supply the last building blocks for a complete cryptographic protocol, and require an advanced knowledge of real-world cryptographic protocols to be useful. Again, it is recommended that developers use either the [tls](https://nodejs.org/api/tls.html) module or the [https](https://nodejs.org/api/https.html) module if applicable. diff --git a/locale/ro/knowledge/index.md b/locale/ro/knowledge/index.md new file mode 100644 index 000000000000..e6ee74d2e977 --- /dev/null +++ b/locale/ro/knowledge/index.md @@ -0,0 +1,6 @@ +--- +title: Knowledge Base +layout: knowledge-base-index.hbs +--- + +# Knowledge Base diff --git a/locale/ro/knowledge/javascript-conventions/what-are-the-built-in-timer-functions.md b/locale/ro/knowledge/javascript-conventions/what-are-the-built-in-timer-functions.md new file mode 100644 index 000000000000..52e993be2293 --- /dev/null +++ b/locale/ro/knowledge/javascript-conventions/what-are-the-built-in-timer-functions.md @@ -0,0 +1,103 @@ +--- +title: What are the built-in timer functions? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - builtin + - globals +difficulty: 1 +layout: knowledge-post.hbs +--- + +There are two most common built-in timer functions, `setTimeout` and `setInterval`, which can be used to call a function at a later time. For an example usage: + +```js +setTimeout(function() { console.log("setTimeout: It's been one second!"); }, 1000); +setInterval(function() { console.log("setInterval: It's been one second!"); }, 1000); +``` + +An example output is: + +```bash +setTimeout: It's been one second! +setInterval: It's been one second! +setInterval: It's been one second! +setInterval: It's been one second! +setInterval: It's been one second! +... +``` + +As you can see the parameters to both are the same. The number second parameter says how long in milliseconds to wait before calling the function passed into the first parameter. The difference between the two functions is that `setTimeout` calls the callback only once while `setInterval` will call it over and over again. + +Typically you want to be careful with `setInterval` because it can cause some undesirable effects. If, for example, you wanted to make sure your server was up by pinging it every second, you might think to try something like this: + +```js +setInterval(ping, 1000); +``` + +This can cause problems, however, if your server is slow and it takes, for example, 3 seconds to respond to the first request. In the time it takes to get back the response, you would have sent off 3 more requests - not exactly desirable! Overall, this doesn't have a large impact when serving small static files. But if you're doing an expensive operation, such as a database query or any complex computation, this can have undesirable results. A common solution looks like this: + +```js +const recursive = function () { + console.log("It has been one second!"); + setTimeout(recursive,1000); +} +recursive(); +``` + +As you can see, it makes a call to the `recursive` function which, as it completes, makes a call to `setTimeout(recursive, 1000)` which makes it call `recursive` again in 1 second - thus having near the same effect as setInterval while being resilient to the unintended errors that can pile up. + +You can clear the timers you set with `clearTimeout` and `clearInterval`. Their usages are very simple: + +```js +function never_call () { + console.log("You should never call this function"); +} + +const id1 = setTimeout(never_call,1000); +const id2 = setInterval(never_call,1000); + +clearTimeout(id1); +clearInterval(id2); +``` + +So if you keep track of the return values of the timers, you can easily unhook the timers. + +The final trick for the timer objects is you can pass parameters to the callback by passing more parameters to setTimeout and setInterval: + +```js +setTimeout(console.log, 1000, "This", "has", 4, "parameters"); +setInterval(console.log, 1000, "This only has one"); +``` + +The output is: + +```bash +This has 4 parameters +This only has one +This only has one +This only has one +This only has one +This only has one +... +``` + +#### setImmediate() + +`setImmediate()` is another built-in timer function which as the name suggest, runs immediately after the first iteration of the event loop is completed. In other words, `setImmediate()` is similar to a `setTimeout()` function with a `0ms` delay. The `setImmediate()` function can also take extra parameters that are passed when the callback is called: + +```js +console.log("This will be printed first"); +setImmediate(console.log, "This is an extra parameter"); +console.log("This will be printed second"); +``` + +The output is: + +```bash +This will be printed first +This will be printed second +This is an extra parameter +``` + +Remember that though `setImmediate()` has no delay (i.e, 0ms) this doesn't mean that the code will run synchronously. It simply means that there will be no delay (i.e, 0ms) after the first iteration of the event loop is completed i.e, all synchronous commands have been executed. diff --git a/locale/ro/knowledge/javascript-conventions/what-is-the-arguments-object.md b/locale/ro/knowledge/javascript-conventions/what-is-the-arguments-object.md new file mode 100644 index 000000000000..dc85f2e98bed --- /dev/null +++ b/locale/ro/knowledge/javascript-conventions/what-is-the-arguments-object.md @@ -0,0 +1,65 @@ +--- +date: '2011-08-26T10:08:50.000Z' +tags: + - truthy + - falsy + - types + - coercion +title: What is the arguments object? +difficulty: 4 +layout: knowledge-post.hbs +--- + +The `arguments` object is a special construct available inside all function calls. It represents the list of arguments that were passed in when invoking the function. Since JavaScript allows functions to be called with any number args, we need a way to dynamically discover and access them. + +The `arguments` object is an array-like object. It has a length property that corresponds to the number of arguments passed into the function. You can access these values by indexing into the array, e.g. `arguments[0]` is the first argument. The only other property of `arguments` is callee, which ES5 forbids to use in `strict mode` more about it could be found [here](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Functions/arguments/callee). Here's an example that illustrates the properties of `arguments`. + +```js +const myfunc = function(one) { + arguments[0] === one; + arguments[1] === 2; + arguments.length === 3; +} + +myfunc(1, 2, 3); +``` + +This construct is very useful and gives JavaScript functions a lot of flexibility. But there is an important gotcha. The `arguments` object behaves like an array, but it is not an actual array. It does not have Array in its prototype chain and it does not respond to any array methods, e.g. `arguments.sort()` raises a TypeError. Instead, you need to copy the values into a true array first. With the advent of ES6 `Array.from()` method this is quite straightforward. + +```js +const myfunc = function(a, b, c) { + const args = Array.from(arguments); + console.log(args) // [1, 2, 3] +} + +myfunc(1, 2, 3); +``` + +NOTE: For ES5 and below, a normal `for` loop can do the trick. + +In certain cases you can still treat `arguments` as an array. You can use `arguments` in dynamic function invocations using apply. And most native Array methods will also accept `arguments` when dynamically invoked using call or apply. This technique also suggests another way to convert `arguments` into a true array using the `Array.slice` method. + +```js +myfunc.apply(obj, arguments). + +// concat arguments onto the +Array.prototype.concat.apply([1,2,3], arguments); + +// turn arguments into a true array +const args = Array.prototype.slice.call(arguments); + +// cut out first argument +args = Array.prototype.slice.call(arguments, 1); +``` + +### Arguments object in arrow function + +The `arrow functions` were added in the ECMAScript 2015 (ES6) specification as a syntactically compact alternative to a regular function expression. A drawback to this new alternative is the lack of `arguments object` (and `this`, `super`, and `new.target` keywords). A workaround for such cases is the use of `rest parameter`. The `rest parameter` allows you to represent an indefinite number of arguments as an array. For more details read [here](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Functions/rest_parameters). + +```js +const myfunc = (...args) => { + console.log('first parameter is ', args[0]); +} + +myfunc(1, 2, 3); +``` diff --git a/locale/ro/security.md b/locale/ro/security.md new file mode 100644 index 000000000000..09cf6202c549 --- /dev/null +++ b/locale/ro/security.md @@ -0,0 +1,51 @@ +--- +layout: security.hbs +title: Security +--- + +# Security + +## Reporting a Bug in Node.js + +Report security bugs in Node.js via [HackerOne](https://hackerone.com/nodejs). + +Your report will be acknowledged within 24 hours, and you’ll receive a more detailed response to your report within 48 hours indicating the next steps in handling your submission. + +After the initial reply to your report, the security team will endeavor to keep you informed of the progress being made towards a fix and full announcement, and may ask for additional information or guidance surrounding the reported issue. These updates will be sent at least every five days; in practice, this is more likely to be every 24-48 hours. + +### Node.js Bug Bounty Program + +The Node.js project engages in an official bug bounty program for security researchers and responsible public disclosures. The program is managed through the HackerOne platform. See for further details. + +## Reporting a Bug in a third party module + +Security bugs in third party modules should be reported to their respective maintainers and should also be coordinated through the Node.js Ecosystem Security Team via [HackerOne](https://hackerone.com/nodejs-ecosystem). + +Details regarding this process can be found in the [Security Working Group repository](https://github.com/nodejs/security-wg/blob/master/processes/third_party_vuln_process.md). + +Thank you for improving the security of Node.js and its ecosystem. Your efforts and responsible disclosure are greatly appreciated and will be acknowledged. + +## Disclosure Policy + +Here is the security disclosure policy for Node.js + +* The security report is received and is assigned a primary handler. This person will coordinate the fix and release process. The problem is confirmed and a list of all affected versions is determined. Code is audited to find any potential similar problems. Fixes are prepared for all releases which are still under maintenance. These fixes are not committed to the public repository but rather held locally pending the announcement. + +* A suggested embargo date for this vulnerability is chosen and a CVE (Common Vulnerabilities and Exposures (CVE®)) is requested for the vulnerability. + +* On the embargo date, the Node.js security mailing list is sent a copy of the announcement. The changes are pushed to the public repository and new builds are deployed to nodejs.org. Within 6 hours of the mailing list being notified, a copy of the advisory will be published on the Node.js blog. + +* Typically the embargo date will be set 72 hours from the time the CVE is issued. However, this may vary depending on the severity of the bug or difficulty in applying a fix. + +* This process can take some time, especially when coordination is required with maintainers of other projects. Every effort will be made to handle the bug in as timely a manner as possible; however, it’s important that we follow the release process above to ensure that the disclosure is handled in a consistent manner. + +## Receiving Security Updates + +Security notifications will be distributed via the following methods. + +* +* + +## Comments on this Policy + +If you have suggestions on how this process could be improved please submit a [pull request](https://github.com/nodejs/nodejs.org) or [file an issue](https://github.com/nodejs/security-wg/issues/new) to discuss. diff --git a/locale/ro/site.json b/locale/ro/site.json new file mode 100644 index 000000000000..8a41eec71f3d --- /dev/null +++ b/locale/ro/site.json @@ -0,0 +1,155 @@ +{ + "title": "Node.js", + "author": "Node.js", + "url": "https://nodejs.org/en/", + "locale": "en", + "language": "English", + "languageEnglishVersion": "English", + "scrollToTop": "Scroll to top", + "reportNodeIssue": "Report Node.js issue", + "reportWebsiteIssue": "Report website issue", + "getHelpIssue": "Get Help", + "by": "by", + "all-downloads": "All download options", + "nightly": "Nightly builds", + "chakracore-nightly": "Node-ChakraCore Nightly builds", + "unofficial-builds": "Unofficial builds", + "previous": "Previous", + "next": "Next", + "feeds": [ + { + "link": "feed/blog.xml", + "text": "Node.js Blog" + }, + { + "link": "feed/releases.xml", + "text": "Node.js Blog: Releases" + }, + { + "link": "feed/vulnerability.xml", + "text": "Node.js Blog: Vulnerability Reports" + } + ], + "home": { + "text": "Home" + }, + "about": { + "link": "about", + "text": "About", + "governance": { + "link": "about/governance", + "text": "Governance" + }, + "community": { + "link": "about/community", + "text": "Community" + }, + "workinggroups": { + "link": "about/working-groups", + "text": "Working Groups" + }, + "releases": { + "link": "about/releases", + "text": "Releases" + }, + "resources": { + "link": "about/resources", + "text": "Resources" + }, + "trademark": { + "link": "about/trademark", + "text": "Trademark" + }, + "privacy": { + "link": "about/privacy", + "text": "Privacy Policy" + } + }, + "download": { + "link": "download", + "text": "Downloads", + "releases": { + "link": "download/releases", + "text": "Previous Releases" + }, + "package-manager": { + "link": "download/package-manager", + "text": "Installing Node.js via package manager" + }, + "shasums": { + "link": "SHASUMS256.txt.asc", + "text": "Signed SHASUMS for release files", + "verify-link": "https://github.com/nodejs/node#verifying-binaries", + "verify-text": "How to verify" + }, + "install-on-linux": { + "text": "Installing Node.js via binary archive" + } + }, + "docs": { + "link": "docs", + "text": "Docs", + "es6": { + "link": "docs/es6", + "text": "ES6 and beyond" + }, + "api-lts": { + "link": "https://nodejs.org/dist/latest-%ver-major%/docs/api/", + "subtext": "LTS", + "text": "%ver% API" + }, + "api-current": { + "link": "https://nodejs.org/dist/latest-%ver-major%/docs/api/", + "text": "%ver% API" + }, + "guides": { + "link": "docs/guides", + "text": "Guides" + }, + "dependencies": { + "link": "docs/meta/topics/dependencies", + "text": "Dependencies" + } + }, + "getinvolved": { + "link": "get-involved", + "text": "Get Involved", + "code-and-learn": { + "link": "get-involved/code-and-learn", + "text": "Code + Learn" + }, + "collab-summit": { + "link": "get-involved/collab-summit", + "text": "Collab Summit" + }, + "contribute": { + "link": "get-involved/contribute", + "text": "Contribute" + }, + "conduct": { + "link": "https://github.com/nodejs/node/blob/master/doc/guides/contributing/coc.md#code-of-conduct", + "text": "Code of Conduct" + }, + "node-meetups": { + "link": "get-involved/node-meetups", + "text": "Node.js Meetups" + } + }, + "security": { + "link": "security", + "text": "Security" + }, + "blog": { + "link": "blog", + "text": "News" + }, + "releases": { + "title": "Release History", + "downloads": "Downloads" + }, + "links": { + "pages": { + "changelog": "Changelog" + } + } +} diff --git a/locale/ru/index.md b/locale/ru/index.md index 6c234850957a..2040fa1f0262 100644 --- a/locale/ru/index.md +++ b/locale/ru/index.md @@ -15,6 +15,9 @@ labels: api: Документация version-schedule-prompt: Или смотрите на version-schedule-prompt-link-text: график LTS. + newsletter: true + newsletter-prefix: Подпишитесь на + newsletter-postfix: ", официальный ежемесячный информационный бюллетень Node.js." --- Node.js® — это JavaScript-окружение построенное на движке [Chrome V8](https://v8.dev/).